input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
0.00000000048 * mu.cost(2.95878690387 + 11527.12508919240 * x)
R1 += 0.00000000052 * mu.cost(0.01971915447 + 8226.57883637840 * x)
R1 += 0.00000000045 * mu.cost(5.07966377852 + 3318.76159737340 * x)
R1 += 0.00000000043 * mu.cost(1.23879381294 + 7218.02936549500 * x)
R1 += 0.00000000058 * mu.cost(5.58121433163 + 6643.09181776180 * x)
R1 += 0.00000000048 * mu.cost(5.02446939402 + 6645.19698672220 * x)
R1 += 0.00000000043 * mu.cost(0.69492704598 + 20995.39296644940 * x)
R1 += 0.00000000044 * mu.cost(4.02272101657 + 9389.05354078620 * x)
R1 += 0.00000000055 * mu.cost(4.38138154697 + 1478.86657406440 * x)
R1 += 0.00000000051 * mu.cost(4.24292455428 + 792.77488846740 * x)
R1 += 0.00000000042 * mu.cost(2.74826708762 + 14577.18472611980 * x)
R1 += 0.00000000044 * mu.cost(4.18397905503 + 8535.85715903420 * x)
R1 += 0.00000000047 * mu.cost(1.33588473182 + 632.78373931320 * x)
R1 += 0.00000000042 * mu.cost(5.05676915852 + 3397.51080163540 * x)
R1 += 0.00000000042 * mu.cost(0.28204510006 + 10001.48196070061 * x)
R1 += 0.00000000042 * mu.cost(0.75310918544 + 6357.71943674220 * x)
R1 += 0.00000000042 * mu.cost(4.94532732982 + 18052.92954315780 * x)
R1 += 0.00000000052 * mu.cost(4.09912687749 + 5835.14201864840 * x)
R1 += 0.00000000054 * mu.cost(2.46533302314 + 8186.51266249260 * x)
R1 += 0.00000000043 * mu.cost(4.77713978044 + 32124.36905223359 * x)
R1 += 0.00000000053 * mu.cost(6.08293348275 + 3377.21779200400 * x)
R1 += 0.00000000041 * mu.cost(2.51168269556 + 4186.69526145100 * x)
R1 += 0.00000000041 * mu.cost(1.24482327948 + 3212.59358336240 * x)
R1 += 0.00000000041 * mu.cost(5.42003026893 + 685.04405422600 * x)
R1 += 0.00000000041 * mu.cost(5.04768364997 + 6571.01853218020 * x)
R1 += 0.00000000042 * mu.cost(2.08904552145 + 13363.43002786740 * x)
R1 += 0.00000000048 * mu.cost(4.82888746454 + 1835.32461760680 * x)
R1 += 0.00000000043 * mu.cost(4.17203713456 + 43340.65334255760 * x)
R1 += 0.00000000046 * mu.cost(0.81640935106 + 45884.96776244100 * x)
R1 += 0.00000000042 * mu.cost(2.26773068307 + 12729.66596748600 * x)
R1 += 0.00000000039 * mu.cost(5.85791936573 + 846.08283475120 * x)
R1 += 0.00000000043 * mu.cost(2.90976420757 + 6872.67311951120 * x)
R1 += 0.00000000045 * mu.cost(1.98725045807 + 1861.74585263540 * x)
R1 += 0.00000000045 * mu.cost(0.50053853542 + 14128.24277124560 * x)
R1 += 0.00000000046 * mu.cost(2.86512929328 + 38650.17350619900 * x)
R1 += 0.00000000038 * mu.cost(3.65846461938 + 29698.28751133580 * x)
R1 += 0.00000000039 * mu.cost(4.57679716458 + 6901.63749583840 * x)
R1 += 0.00000000039 * mu.cost(3.85504465583 + 9945.57120882380 * x)
R1 += 0.00000000040 * mu.cost(0.06127203284 + 9947.05568153210 * x)
R1 += 0.00000000043 * mu.cost(5.28854105201 + 3274.12501778540 * x)
R1 += 0.00000000047 * mu.cost(6.25707790441 + 24606.13555322000 * x)
R1 += 0.00000000037 * mu.cost(5.02115296017 + 11128.97608578420 * x)
R1 += 0.00000000039 * mu.cost(1.71421919870 + 7696.88787128380 * x)
R1 += 0.00000000037 * mu.cost(4.34652985120 + 3283.71405176420 * x)
R1 += 0.00000000037 * mu.cost(0.05572748092 + 21150.81336588360 * x)
R1 += 0.00000000042 * mu.cost(4.97872041460 + 13575.74880223720 * x)
R1 += 0.00000000050 * mu.cost(4.24170332288 + 7747.72033058960 * x)
R1 += 0.00000000037 * mu.cost(4.07496312186 + 8646.06348025360 * x)
R1 += 0.00000000038 * mu.cost(0.44080908793 + 24491.42579258340 * x)
R1 += 0.00000000036 * mu.cost(1.73681874925 + 3468.63127003720 * x)
R1 += 0.00000000041 * mu.cost(5.69294900686 + 26087.90314157420 * x)
R1 += 0.00000000036 * mu.cost(1.80256389689 + 8756.26980147300 * x)
R1 += 0.00000000036 * mu.cost(3.37374689465 + 48429.28218232440 * x)
R1 += 0.00000000035 * mu.cost(0.10555289345 + 8742.04270747140 * x)
R1 += 0.00000000041 * mu.cost(4.26832466355 + 21000.91589075680 * x)
R1 += 0.00000000038 * mu.cost(0.73199792046 + 26084.02180621620 * x)
R2: float = 0
R2 += 0.00044242247 * mu.cost(0.47930603943 + 3340.61242669980 * x)
R2 += 0.00008138042 * mu.cost(0.86998398093 + 6681.22485339960 * x)
R2 += 0.00001274915 * mu.cost(1.22594050809 + 10021.83728009940 * x)
R2 += 0.00000187387 * mu.cost(1.57298991982 + 13362.44970679920 * x)
R2 += 0.00000040744 * mu.cost(1.97080175060 + 3344.13554504880 * x)
R2 -= 0.00000052396
R2 += 0.00000026616 * mu.cost(1.91665615762 + 16703.06213349900 * x)
R2 += 0.00000017825 * mu.cost(4.43499505333 + 2281.23049651060 * x)
R2 += 0.00000011713 * mu.cost(4.52510453730 + 3185.19202726560 * x)
R2 += 0.00000010209 * mu.cost(5.39143469548 + 1059.38193018920 * x)
R2 += 0.00000009950 * mu.cost(0.41870577185 + 796.29800681640 * x)
R2 += 0.00000009237 * mu.cost(4.53579272961 + 2146.16541647520 * x)
R2 += 0.00000007299 * mu.cost(3.14218509183 + 2544.31441988340 * x)
R2 += 0.00000007217 * mu.cost(2.29300859074 + 6684.74797174860 * x)
R2 += 0.00000006808 * mu.cost(5.26702580055 + 155.42039943420 * x)
R2 += 0.00000006528 * mu.cost(2.30781369329 + 3738.76143010800 * x)
R2 += 0.00000007785 * mu.cost(5.93369079547 + 1748.01641306700 * x)
R2 += 0.00000005840 * mu.cost(1.05191350362 + 1349.86740965880 * x)
R2 += 0.00000006749 * mu.cost(5.30194395749 + 1194.44701022460 * x)
R2 += 0.00000004695 * mu.cost(0.76880586144 + 3097.88382272579 * x)
R2 += 0.00000005391 * mu.cost(1.00223256041 + 3149.16416058820 * x)
R2 += 0.00000004406 * mu.cost(2.45556303355 + 951.71840625060 * x)
R2 += 0.00000004286 * mu.cost(3.89643520638 + 1592.59601363280 * x)
R2 += 0.00000003514 * mu.cost(1.85168391963 + 398.14900340820 * x)
R2 += 0.00000003699 * mu.cost(2.26043707772 + 20043.67456019880 * x)
R2 += 0.00000003377 * mu.cost(3.81683532672 + 1751.53953141600 * x)
R2 += 0.00000004585 * mu.cost(0.80787441740 + 4136.91043351620 * x)
R2 += 0.00000003201 * mu.cost(2.11657635165 + 5614.72937620960 * x)
R2 += 0.00000003622 * mu.cost(1.32395191387 + 3333.49887969900 * x)
R2 += 0.00000002916 * mu.cost(1.19337460559 + 529.69096509460 * x)
R2 += 0.00000002979 * mu.cost(2.86481008776 + 6151.53388830500 * x)
R2 += 0.00000003057 * mu.cost(4.55276793064 + 5628.95647021120 * x)
R2 += 0.00000002906 * mu.cost(1.20295377623 + 3894.18182954220 * x)
R2 += 0.00000003850 * mu.cost(3.86055626689 + 553.56940284240 * x)
R2 += 0.00000002820 * mu.cost(2.48683324916 + 1990.74501704100 * x)
R2 += 0.00000002657 * mu.cost(6.07411629964 + 4292.33083295040 * x)
R2 += 0.00000002700 * mu.cost(2.92139773350 + 3496.03282613400 * x)
R2 += 0.00000002395 * mu.cost(5.94175921617 + 2787.04302385740 * x)
R2 += 0.00000002264 * mu.cost(2.56219866409 + 191.44826611160 * x)
R2 += 0.00000002167 * mu.cost(5.36812435483 + 8962.45534991020 * x)
R2 += 0.00000002149 * mu.cost(2.74950075397 + 242.72860397400 * x)
R2 += 0.00000002217 * mu.cost(1.85265984462 + 3337.08930835080 * x)
R2 += 0.00000001996 * mu.cost(5.76429928131 + 3341.59274776800 * x)
R2 += 0.00000001999 * mu.cost(3.82349238481 + 2914.01423582380 * x)
R2 += 0.00000001835 * mu.cost(5.68592723044 + 1589.07289528380 * x)
R2 += 0.00000001812 * mu.cost(3.32042068028 + 5088.62883976680 * x)
R2 += 0.00000002413 * mu.cost(4.68291336853 + 4690.47983635860 * x)
R2 += 0.00000001970 * mu.cost(4.17480610904 + 3340.59517304760 * x)
R2 += 0.00000001970 * mu.cost(6.20643855008 + 3340.62968035200 * x)
R2 += 0.00000001627 * mu.cost(5.67733051452 + 4535.05943692440 * x)
R2 += 0.00000002160 * mu.cost(1.07452599834 + 2388.89402044920 * x)
R2 += 0.00000001964 * mu.cost(3.10805316088 + 3583.34103067380 * x)
R2 += 0.00000001985 * mu.cost(5.75850351840 + 4399.99435688900 * x)
R2 += 0.00000001507 * mu.cost(4.95936409838 + 382.89653222320 * x)
R2 += 0.00000001278 * mu.cost(4.82232889938 + 2957.71589447660 * x)
R2 += 0.00000001475 * mu.cost(2.22707926559 + 3723.50895892300 * x)
R2 += 0.00000001196 * mu.cost(3.26724458920 + 9492.14631500480 * x)
R2 += 0.00000001349 * mu.cost(4.87573224485 + 6525.80445396540 * x)
R2 += 0.00000001433 * mu.cost(2.69734916443 + 7079.37385680780 * x)
R2 += 0.00000001224 * mu.cost(2.62012336714 + 10025.36039844840 * x)
R2 += 0.00000001404 * mu.cost(5.19056026479 + 2700.71514038580 * x)
R2 += 0.00000001202 * mu.cost(0.93472783088 + 2810.92146160520 * x)
R2 += 0.00000000869 * mu.cost(5.81340811635 + 12303.06777661000 * x)
R2 += 0.00000000867 * mu.cost(2.20046640409 + 2699.73481931760 * x)
R2 += 0.00000000830 * mu.cost(2.01484544773 + 5092.15195811580 * x)
R2 += 0.00000000855 * mu.cost(5.96220147975 + 426.59819087600 * x)
R2 += 0.00000000848 * mu.cost(2.26407047301 + 6283.07584999140 * x)
R2 += 0.00000000917 * mu.cost(1.40295785881 + 6489.77658728800 * x)
R2 += 0.00000000833 * mu.cost(1.17384197174 + 7477.52286021600 * x)
R2 += 0.00000001041 * mu.cost(6.27171470048 + 3347.72597370060 * x)
R2 += 0.00000000965 * mu.cost(3.39855816541 + 5621.84292321040 * x)
R2 += 0.00000000722 * mu.cost(4.26304776331 + 4933.20844033260 * x)
R2 += 0.00000000706 * mu.cost(2.34131594714 + 7.11354700080 * x)
R2 += 0.00000000768 * mu.cost(2.06208352904 + 5486.77784317500 * x)
R2 += 0.00000000953 * mu.cost(2.11123497948 + 3870.30339179440 * x)
R2 += 0.00000000844 * mu.cost(2.23931963240 + 3553.91152213780 * x)
R2 += 0.00000000646 * mu.cost(2.24669034469 + 3340.54511639700 * x)
R2 += 0.00000000653 * mu.cost(3.99043329363 + 6677.70173505060 * x)
R2 += 0.00000000714 * mu.cost(0.29739480601 + 6681.24210705180 * x)
R2 += 0.00000000828 * mu.cost(0.22863617670 + 3532.06069281140 * x)
R2 += 0.00000000612 * mu.cost(1.55388376751 + 7234.79425624200 * x)
R2 += 0.00000000714 * mu.cost(4.54969883976 + 6681.20759974740 * x)
R2 += 0.00000000586 * mu.cost(3.30118433303 + 1221.84856632140 * x)
R2 += 0.00000000646 * mu.cost(1.83853693231 + 3340.67973700260 * x)
R2 += 0.00000000560 * mu.cost(5.05848353328 + 8031.09226305840 * x)
R2 += 0.00000000651 * mu.cost(0.15897472160 + 7632.94325965020 * x)
R2 += 0.00000000488 * mu.cost(3.08086378649 + | |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
from collections import defaultdict
from itertools import groupby
from odoo import api, fields, models, _
from odoo.exceptions import AccessError, UserError
from odoo.tools import date_utils, float_compare, float_round, float_is_zero
class MrpProduction(models.Model):
""" Manufacturing Orders """
_name = 'mrp.production'
_description = 'Production Order'
_date_name = 'date_planned_start'
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'date_planned_start asc,id'
@api.model
def _get_default_picking_type(self):
company_id = self.env.context.get('default_company_id', self.env.company.id)
return self.env['stock.picking.type'].search([
('code', '=', 'mrp_operation'),
('warehouse_id.company_id', '=', company_id),
], limit=1).id
@api.model
def _get_default_location_src_id(self):
location = False
company_id = self.env.context.get('default_company_id', self.env.company.id)
if self.env.context.get('default_picking_type_id'):
location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_src_id
if not location:
location = self.env['stock.warehouse'].search([('company_id', '=', company_id)], limit=1).lot_stock_id
return location and location.id or False
@api.model
def _get_default_location_dest_id(self):
location = False
company_id = self.env.context.get('default_company_id', self.env.company.id)
if self._context.get('default_picking_type_id'):
location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_dest_id
if not location:
location = self.env['stock.warehouse'].search([('company_id', '=', company_id)], limit=1).lot_stock_id
return location and location.id or False
@api.model
def _get_default_date_planned_finished(self):
if self.env.context.get('default_date_planned_start'):
return fields.Datetime.to_datetime(self.env.context.get('default_date_planned_start')) + datetime.timedelta(hours=1)
return datetime.datetime.now() + datetime.timedelta(hours=1)
name = fields.Char(
'Reference', copy=False, readonly=True, default=lambda x: _('New'))
origin = fields.Char(
'Source', copy=False,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Reference of the document that generated this production order request.")
product_id = fields.Many2one(
'product.product', 'Product',
domain="[('bom_ids', '!=', False), ('bom_ids.active', '=', True), ('bom_ids.type', '=', 'normal'), ('type', 'in', ['product', 'consu']), '|', ('company_id', '=', False), ('company_id', '=', company_id)]",
readonly=True, required=True, check_company=True,
states={'draft': [('readonly', False)]})
product_tmpl_id = fields.Many2one('product.template', 'Product Template', related='product_id.product_tmpl_id')
product_qty = fields.Float(
'Quantity To Produce',
default=1.0, digits='Product Unit of Measure',
readonly=True, required=True, tracking=True,
states={'draft': [('readonly', False)]})
product_uom_id = fields.Many2one(
'uom.uom', 'Product Unit of Measure',
readonly=True, required=True,
states={'draft': [('readonly', False)]})
product_uom_qty = fields.Float(string='Total Quantity', compute='_compute_product_uom_qty', store=True)
picking_type_id = fields.Many2one(
'stock.picking.type', 'Operation Type',
domain="[('code', '=', 'mrp_operation'), ('company_id', '=', company_id)]",
default=_get_default_picking_type, required=True, check_company=True)
location_src_id = fields.Many2one(
'stock.location', 'Components Location',
default=_get_default_location_src_id,
readonly=True, required=True,
domain="[('usage','=','internal'), '|', ('company_id', '=', False), ('company_id', '=', company_id)]",
states={'draft': [('readonly', False)]}, check_company=True,
help="Location where the system will look for components.")
location_dest_id = fields.Many2one(
'stock.location', 'Finished Products Location',
default=_get_default_location_dest_id,
readonly=True, required=True,
domain="[('usage','=','internal'), '|', ('company_id', '=', False), ('company_id', '=', company_id)]",
states={'draft': [('readonly', False)]}, check_company=True,
help="Location where the system will stock the finished products.")
date_planned_start = fields.Datetime(
'Planned Date', copy=False, default=fields.Datetime.now,
help="Date at which you plan to start the production.",
index=True, required=True, store=True)
date_planned_finished = fields.Datetime(
'Planned End Date',
default=_get_default_date_planned_finished,
help="Date at which you plan to finish the production.",
copy=False, store=True)
date_deadline = fields.Datetime(
'Deadline', copy=False, index=True,
help="Informative date allowing to define when the manufacturing order should be processed at the latest to fulfill delivery on time.")
date_start = fields.Datetime('Start Date', copy=False, index=True, readonly=True)
date_finished = fields.Datetime('End Date', copy=False, index=True, readonly=True)
date_start_wo = fields.Datetime(
'Plan From', copy=False, readonly=True,
help="Work orders will be planned based on the availability of the work centers\
starting from this date. If empty, the work orders will be planned as soon as possible.",
)
bom_id = fields.Many2one(
'mrp.bom', 'Bill of Material',
readonly=True, states={'draft': [('readonly', False)]},
domain="""[
'&',
'|',
('company_id', '=', False),
('company_id', '=', company_id),
'&',
'|',
('product_id','=',product_id),
'&',
('product_tmpl_id.product_variant_ids','=',product_id),
('product_id','=',False),
('type', '=', 'normal')]""",
check_company=True,
help="Bill of Materials allow you to define the list of required components to make a finished product.")
routing_id = fields.Many2one(
'mrp.routing', 'Routing',
readonly=True, compute='_compute_routing', store=True,
help="The list of operations (list of work centers) to produce the finished product. The routing "
"is mainly used to compute work center costs during operations and to plan future loads on "
"work centers based on production planning.")
state = fields.Selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('planned', 'Planned'),
('progress', 'In Progress'),
('to_close', 'To Close'),
('done', 'Done'),
('cancel', 'Cancelled')], string='State',
compute='_compute_state', copy=False, index=True, readonly=True,
store=True, tracking=True,
help=" * Draft: The MO is not confirmed yet.\n"
" * Confirmed: The MO is confirmed, the stock rules and the reordering of the components are trigerred.\n"
" * Planned: The WO are planned.\n"
" * In Progress: The production has started (on the MO or on the WO).\n"
" * To Close: The production is done, the MO has to be closed.\n"
" * Done: The MO is closed, the stock moves are posted. \n"
" * Cancelled: The MO has been cancelled, can't be confirmed anymore.")
reservation_state = fields.Selection([
('confirmed', 'Waiting'),
('assigned', 'Ready'),
('waiting', 'Waiting Another Operation')],
string='Material Availability',
compute='_compute_state', copy=False, index=True, readonly=True,
store=True, tracking=True,
help=" * Ready: The material is available to start the production.\n\
* Waiting: The material is not available to start the production.\n\
The material availability is impacted by the manufacturing readiness\
defined on the BoM.")
move_raw_ids = fields.One2many(
'stock.move', 'raw_material_production_id', 'Components',
copy=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=[('scrapped', '=', False)])
move_finished_ids = fields.One2many(
'stock.move', 'production_id', 'Finished Products',
copy=False, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=[('scrapped', '=', False)])
finished_move_line_ids = fields.One2many(
'stock.move.line', compute='_compute_lines', inverse='_inverse_lines', string="Finished Product"
)
workorder_ids = fields.One2many(
'mrp.workorder', 'production_id', 'Work Orders',
copy=False, readonly=True)
workorder_count = fields.Integer('# Work Orders', compute='_compute_workorder_count')
workorder_done_count = fields.Integer('# Done Work Orders', compute='_compute_workorder_done_count')
move_dest_ids = fields.One2many('stock.move', 'created_production_id',
string="Stock Movements of Produced Goods")
unreserve_visible = fields.Boolean(
'Allowed to Unreserve Inventory', compute='_compute_unreserve_visible',
help='Technical field to check when we can unreserve')
post_visible = fields.Boolean(
'Allowed to Post Inventory', compute='_compute_post_visible',
help='Technical field to check when we can post')
user_id = fields.Many2one(
'res.users', 'Responsible', default=lambda self: self.env.user,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=lambda self: [('groups_id', 'in', self.env.ref('mrp.group_mrp_user').id)])
company_id = fields.Many2one(
'res.company', 'Company', default=lambda self: self.env.company,
index=True, required=True)
qty_produced = fields.Float(compute="_get_produced_qty", string="Quantity Produced")
procurement_group_id = fields.Many2one(
'procurement.group', 'Procurement Group',
copy=False)
orderpoint_id = fields.Many2one('stock.warehouse.orderpoint', 'Orderpoint')
propagate_cancel = fields.Boolean(
'Propagate cancel and split',
help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too')
propagate_date = fields.Boolean(string="Propagate Rescheduling",
help='The rescheduling is propagated to the next move.')
propagate_date_minimum_delta = fields.Integer(string='Reschedule if Higher Than',
help='The change must be higher than this value to be propagated')
scrap_ids = fields.One2many('stock.scrap', 'production_id', 'Scraps')
scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move')
priority = fields.Selection([('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')], 'Priority',
readonly=True, states={'draft': [('readonly', False)]}, default='1')
is_locked = fields.Boolean('Is Locked', default=True, copy=False)
show_final_lots = fields.Boolean('Show Final Lots', compute='_compute_show_lots')
production_location_id = fields.Many2one('stock.location', "Production Location", related='product_id.property_stock_production', readonly=False, related_sudo=False)
picking_ids = fields.Many2many('stock.picking', compute='_compute_picking_ids', string='Picking associated to this manufacturing order')
delivery_count = fields.Integer(string='Delivery Orders', compute='_compute_picking_ids')
confirm_cancel = fields.Boolean(compute='_compute_confirm_cancel')
@api.depends('move_raw_ids.state', 'move_finished_ids.state')
def _compute_confirm_cancel(self):
""" If the manufacturing order contains some done move (via an intermediate
post inventory), the user has to confirm the cancellation.
"""
domain = [
('state', '=', 'done'),
'|',
('production_id', 'in', self.ids),
('raw_material_production_id', 'in', self.ids)
]
res = self.env['stock.move'].read_group(domain, ['state', 'production_id', 'raw_material_production_id'], ['production_id', 'raw_material_production_id'], lazy=False)
productions_with_done_move = {}
for rec in res:
production_record = rec['production_id'] or rec['raw_material_production_id']
if production_record:
productions_with_done_move[production_record[0]] = True
for production in self:
production.confirm_cancel = productions_with_done_move.get(production.id, False)
@api.depends('procurement_group_id')
def _compute_picking_ids(self):
for order in self:
order.picking_ids = self.env['stock.picking'].search([
('group_id', '=', order.procurement_group_id.id), ('group_id', '!=', False),
])
order.delivery_count = len(order.picking_ids)
def action_view_mo_delivery(self):
""" This function returns an action that display picking related to
manufacturing order orders. It can either be a in a list or in a form
view, if there is only one picking to show.
"""
self.ensure_one()
action = self.env.ref('stock.action_picking_tree_all').read()[0]
pickings = self.mapped('picking_ids')
if len(pickings) > 1:
action['domain'] = [('id', 'in', pickings.ids)]
elif pickings:
form_view = [(self.env.ref('stock.view_picking_form').id, 'form')]
if 'views' in action:
action['views'] = form_view + [(state,view) for state,view in action['views'] if view != 'form']
else:
action['views'] = form_view
action['res_id'] = pickings.id
action['context'] = dict(self._context, default_origin=self.name, create=False)
return action
@api.depends('product_uom_id', 'product_qty', 'product_id.uom_id')
def _compute_product_uom_qty(self):
for production in self:
if production.product_id.uom_id != production.product_uom_id:
production.product_uom_qty = production.product_uom_id._compute_quantity(production.product_qty, production.product_id.uom_id)
else:
production.product_uom_qty = production.product_qty
@api.depends('product_id.tracking')
def _compute_show_lots(self):
for production in self:
production.show_final_lots = production.product_id.tracking != 'none'
def _inverse_lines(self):
""" Little hack to make sure that when you change something on these objects, it gets saved"""
pass
@api.depends('move_finished_ids.move_line_ids')
def _compute_lines(self):
for production in self:
production.finished_move_line_ids = production.move_finished_ids.mapped('move_line_ids')
@api.depends('bom_id.routing_id', 'bom_id.routing_id.operation_ids')
def _compute_routing(self):
for production in self:
if production.bom_id.routing_id.operation_ids:
production.routing_id = production.bom_id.routing_id.id
else:
production.routing_id = False
@api.depends('workorder_ids')
def _compute_workorder_count(self):
data = self.env['mrp.workorder'].read_group([('production_id', 'in', self.ids)], ['production_id'], ['production_id'])
count_data = dict((item['production_id'][0], item['production_id_count']) for item in data)
for production in self:
production.workorder_count = count_data.get(production.id, 0)
@api.depends('workorder_ids.state')
def | |
2.0 specification.
# All multiple-byte fields are represented in host-endian format.
class libusb_device_descriptor(Structure):
_fields_ = [
# Size of this descriptor (in bytes)
('bLength', c_uint8),
# Descriptor type. Will have value LIBUSB_DT_DEVICE in this
# context.
('bDescriptorType', c_uint8),
# USB specification release number in binary-coded decimal. A
# value of 0x0200 indicates USB 2.0, 0x0110 indicates USB 1.1,
# etc.
('bcdUSB', c_uint16),
# USB-IF class code for the device. See libusb_class_code.
('bDeviceClass', c_uint8),
# USB-IF subclass code for the device, qualified by the
# bDeviceClass value
('bDeviceSubClass', c_uint8),
# USB-IF protocol code for the device, qualified by the
# bDeviceClass and bDeviceSubClass values
('bDeviceProtocol', c_uint8),
# Maximum packet size for endpoint 0
('bMaxPacketSize0', c_uint8),
# USB-IF vendor ID
('idVendor', c_uint16),
# USB-IF product ID
('idProduct', c_uint16),
# Device release number in binary-coded decimal
('bcdDevice', c_uint16),
# Index of string descriptor describing manufacturer
('iManufacturer', c_uint8),
# Index of string descriptor describing product
('iProduct', c_uint8),
# Index of string descriptor containing device serial number
('iSerialNumber', c_uint8),
# Number of possible configurations
('bNumConfigurations', c_uint8)]
libusb_device_descriptor_p = POINTER(libusb_device_descriptor)
class libusb_endpoint_descriptor(Structure):
_fields_ = [
('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bEndpointAddress', c_uint8),
('bmAttributes', c_uint8),
('wMaxPacketSize', c_uint16),
('bInterval', c_uint8),
('bRefresh', c_uint8),
('bSynchAddress', c_uint8),
('extra', c_void_p),
('extra_length', c_int)]
libusb_endpoint_descriptor_p = POINTER(libusb_endpoint_descriptor)
class libusb_interface_descriptor(Structure):
_fields_ = [
('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bInterfaceNumber', c_uint8),
('bAlternateSetting', c_uint8),
('bNumEndpoints', c_uint8),
('bInterfaceClass', c_uint8),
('bInterfaceSubClass', c_uint8),
('bInterfaceProtocol', c_uint8),
('iInterface', c_uint8),
('endpoint', libusb_endpoint_descriptor_p),
('extra', c_void_p),
('extra_length', c_int)]
libusb_interface_descriptor_p = POINTER(libusb_interface_descriptor)
class libusb_interface(Structure):
_fields_ = [('altsetting', libusb_interface_descriptor_p),
('num_altsetting', c_int)]
libusb_interface_p = POINTER(libusb_interface)
class libusb_config_descriptor(Structure):
_fields_ = [
('bLength', c_uint8),
('bDescriptorType', c_uint8),
('wTotalLength', c_uint16),
('bNumInterfaces', c_uint8),
('bConfigurationValue', c_uint8),
('iConfiguration', c_uint8),
('bmAttributes', c_uint8),
('MaxPower', c_uint8),
('interface', libusb_interface_p),
('extra', c_void_p),
('extra_length', c_int)]
libusb_config_descriptor_p = POINTER(libusb_config_descriptor)
libusb_config_descriptor_p_p = POINTER(libusb_config_descriptor_p)
class libusb_control_setup(Structure):
_fields_ = [
('bmRequestType', c_uint8),
('bRequest', c_uint8),
('wValue', c_uint16),
('wIndex', c_uint16),
('wLength', c_uint16)]
libusb_control_setup_p = POINTER(libusb_control_setup)
LIBUSB_CONTROL_SETUP_SIZE = sizeof(libusb_control_setup)
# Structure representing a libusb session. The concept of individual libusb
# sessions allows for your program to use two libraries (or dynamically
# load two modules) which both independently use libusb. This will prevent
# interference between the individual libusb users - for example
# libusb_set_debug() will not affect the other user of the library, and
# libusb_exit() will not destroy resources that the other user is still
# using.
#
# Sessions are created by libusb_init() and destroyed through libusb_exit().
# If your application is guaranteed to only ever include a single libusb
# user (i.e. you), you do not have to worry about contexts: pass NULL in
# every function call where a context is required. The default context
# will be used.
#
# For more information, see \ref contexts.
class libusb_context(Structure):
pass
libusb_context_p = POINTER(libusb_context)
libusb_context_p_p = POINTER(libusb_context_p)
# Structure representing a USB device detected on the system. This is an
# opaque type for which you are only ever provided with a pointer, usually
# originating from libusb_get_device_list().
#
# Certain operations can be performed on a device, but in order to do any
# I/O you will have to first obtain a device handle using libusb_open().
#
# Devices are reference counted with libusb_device_ref() and
# libusb_device_unref(), and are freed when the reference count reaches 0.
# New devices presented by libusb_get_device_list() have a reference count of
# 1, and libusb_free_device_list() can optionally decrease the reference count
# on all devices in the list. libusb_open() adds another reference which is
# later destroyed by libusb_close().
class libusb_device(Structure):
pass
libusb_device_p = POINTER(libusb_device)
libusb_device_p_p = POINTER(libusb_device_p)
libusb_device_p_p_p = POINTER(libusb_device_p_p)
# Structure representing a handle on a USB device. This is an opaque type for
# which you are only ever provided with a pointer, usually originating from
# libusb_open().
#
# A device handle is used to perform I/O and other operations. When finished
# with a device handle, you should call libusb_close().
class libusb_device_handle(Structure):
pass
libusb_device_handle_p = POINTER(libusb_device_handle)
libusb_device_handle_p_p = POINTER(libusb_device_handle_p)
class libusb_version(Structure):
_fields_ = [
('major', c_uint16),
('minor', c_uint16),
('micro', c_uint16),
('nano', c_uint16),
('rc', c_char_p),
('describe', c_char_p),
]
libusb_speed = Enum({
# The OS doesn't report or know the device speed.
'LIBUSB_SPEED_UNKNOWN': 0,
# The device is operating at low speed (1.5MBit/s).
'LIBUSB_SPEED_LOW': 1,
# The device is operating at full speed (12MBit/s).
'LIBUSB_SPEED_FULL': 2,
# The device is operating at high speed (480MBit/s).
'LIBUSB_SPEED_HIGH': 3,
# The device is operating at super speed (5000MBit/s).
'LIBUSB_SPEED_SUPER': 4,
})
libusb_supported_speed = Enum({
# Low speed operation supported (1.5MBit/s).
'LIBUSB_LOW_SPEED_OPERATION': 1,
# Full speed operation supported (12MBit/s).
'LIBUSB_FULL_SPEED_OPERATION': 2,
# High speed operation supported (480MBit/s).
'LIBUSB_HIGH_SPEED_OPERATION': 4,
# Superspeed operation supported (5000MBit/s).
'LIBUSB_5GBPS_OPERATION': 8,
})
# Error codes. Most libusb functions return 0 on success or one of these
# codes on failure.
libusb_error = Enum({
# Success (no error)
'LIBUSB_SUCCESS': 0,
# Input/output error
'LIBUSB_ERROR_IO': -1,
# Invalid parameter
'LIBUSB_ERROR_INVALID_PARAM': -2,
# Access denied (insufficient permissions)
'LIBUSB_ERROR_ACCESS': -3,
# No such device (it may have been disconnected)
'LIBUSB_ERROR_NO_DEVICE': -4,
# Entity not found
'LIBUSB_ERROR_NOT_FOUND': -5,
# Resource busy
'LIBUSB_ERROR_BUSY': -6,
# Operation timed out
'LIBUSB_ERROR_TIMEOUT': -7,
# Overflow
'LIBUSB_ERROR_OVERFLOW': -8,
# Pipe error
'LIBUSB_ERROR_PIPE': -9,
# System call interrupted (perhaps due to signal)
'LIBUSB_ERROR_INTERRUPTED': -10,
# Insufficient memory
'LIBUSB_ERROR_NO_MEM': -11,
# Operation not supported or unimplemented on this platform
'LIBUSB_ERROR_NOT_SUPPORTED': -12,
# Other error
'LIBUSB_ERROR_OTHER': -99,
})
# Transfer status codes
libusb_transfer_status = Enum({
# Transfer completed without error. Note that this does not indicate
# that the entire amount of requested data was transferred.
'LIBUSB_TRANSFER_COMPLETED': 0,
# Transfer failed
'LIBUSB_TRANSFER_ERROR': 1,
# Transfer timed out
'LIBUSB_TRANSFER_TIMED_OUT': 2,
# Transfer was cancelled
'LIBUSB_TRANSFER_CANCELLED': 3,
# For bulk/interrupt endpoints: halt condition detected (endpoint
# stalled). For control endpoints: control request not supported.
'LIBUSB_TRANSFER_STALL': 4,
# Device was disconnected
'LIBUSB_TRANSFER_NO_DEVICE': 5,
# Device sent more data than requested
'LIBUSB_TRANSFER_OVERFLOW': 6,
})
# libusb_transfer.flags values
libusb_transfer_flags = Enum({
# Report short frames as errors
'LIBUSB_TRANSFER_SHORT_NOT_OK': 1 << 0,
# Automatically free() transfer buffer during libusb_free_transfer()
'LIBUSB_TRANSFER_FREE_BUFFER': 1 << 1,
# Automatically call libusb_free_transfer() after callback returns.
# If this flag is set, it is illegal to call libusb_free_transfer()
# from your transfer callback, as this will result in a double-free
# when this flag is acted upon.
'LIBUSB_TRANSFER_FREE_TRANSFER': 1 << 2,
# Terminate transfers that are a multiple of the endpoint's
# wMaxPacketSize with an extra zero length packet.
'LIBUSB_TRANSFER_ADD_ZERO_PACKET': 1 << 3,
})
# Isochronous packet descriptor.
class libusb_iso_packet_descriptor(Structure):
_fields_ = [('length', c_uint),
('actual_length', c_uint),
('status', c_int)] # enum libusb_transfer_status
libusb_iso_packet_descriptor_p = POINTER(libusb_iso_packet_descriptor)
class libusb_transfer(Structure):
pass
libusb_transfer_p = POINTER(libusb_transfer)
libusb_transfer_cb_fn_p = CFUNCTYPE(None, libusb_transfer_p)
_libusb_transfer_fields = [
('dev_handle', libusb_device_handle_p),
('flags', c_uint8),
('endpoint', c_uchar),
('type', c_uchar),
('timeout', c_uint),
('status', c_int), # enum libusb_transfer_status
('length', c_int),
('actual_length', c_int),
('callback', libusb_transfer_cb_fn_p),
('user_data', c_void_p),
('buffer', c_void_p),
('num_iso_packets', c_int),
('iso_packet_desc', libusb_iso_packet_descriptor)
]
if 'FreeBSD' in platform.system() and getattr(
libusb, 'libusb_get_string_descriptor', None
) is None:
# Old FreeBSD version has a slight ABI incompatibility.
# Work around it unless libusb_get_string_descriptor is available, as it
# is only available on fixed versions.
assert _libusb_transfer_fields[2][0] == 'endpoint'
_libusb_transfer_fields[2] = ('endpoint', c_uint32)
assert _libusb_transfer_fields[11][0] == 'num_iso_packets'
_libusb_transfer_fields.insert(11, ('os_priv', c_void_p))
# pylint: disable=protected-access
libusb_transfer._fields_ = _libusb_transfer_fields
# pylint: enable=protected-access
libusb_capability = Enum({
# The libusb_has_capability() API is available.
'LIBUSB_CAP_HAS_CAPABILITY': 0x0000,
# Hotplug support is available.
'LIBUSB_CAP_HAS_HOTPLUG': 0x0001,
# The library can access HID devices without requiring user intervention.
'LIBUSB_CAP_HAS_HID_ACCESS': 0x0100,
# The library supports detaching of the default USB driver.
'LIBUSB_CAP_SUPPORTS_DETACH_KERNEL_DRIVER': 0x0101,
})
libusb_log_level = Enum({
'LIBUSB_LOG_LEVEL_NONE': 0,
'LIBUSB_LOG_LEVEL_ERROR': 1,
'LIBUSB_LOG_LEVEL_WARNING': 2,
'LIBUSB_LOG_LEVEL_INFO': 3,
'LIBUSB_LOG_LEVEL_DEBUG': 4,
})
#int libusb_init(libusb_context **ctx);
libusb_init = libusb.libusb_init
libusb_init.argtypes = [libusb_context_p_p]
#void libusb_exit(libusb_context *ctx);
libusb_exit = libusb.libusb_exit
libusb_exit.argtypes = [libusb_context_p]
libusb_exit.restype = None
#void libusb_set_debug(libusb_context *ctx, int level);
libusb_set_debug = libusb.libusb_set_debug
libusb_set_debug.argtypes = [libusb_context_p, c_int]
libusb_set_debug.restype = None
#const struct libusb_version * libusb_get_version(void);
try:
libusb_get_version = libusb.libusb_get_version
except AttributeError:
_dummy_version = libusb_version(0, 0, 0, 0, _empty_char_p, _empty_char_p)
_dummy_version_p = pointer(_dummy_version)
def libusb_get_version():
return _dummy_version_p
else:
libusb_get_version.argtypes = []
libusb_get_version.restype = POINTER(libusb_version)
#int libusb_has_capability(uint32_t capability);
try:
libusb_has_capability = libusb.libusb_has_capability
except AttributeError:
def libusb_has_capability(_):
return 0
else:
libusb_has_capability.argtypes = [c_uint32]
libusb_has_capability.restype = c_int
try:
# Note: Should be equivalent to libusb_error.get (except libusb_error.get
# one raises on unknown values).
#char *libusb_error_name(int errcode);
libusb_error_name = libusb.libusb_error_name
except AttributeError:
# pylint: disable=unused-argument
def libusb_error_name(errcode):
return None
# pylint: enable=unused-argument
else:
libusb_error_name.argtypes = [c_int]
libusb_error_name.restype = c_char_p
# Note on libusb_strerror, libusb_setlocale and future functions in the
# same spirit:
# I do not think end-user-facing messages belong to a technical library.
# Such features bring a new, non essential set of problems, and is a luxury
# I do not want to spend time | |
ClusterAddonsConfigGcePersistentDiskCsiDriverConfig.from_proto(i)
for i in resources
]
class ClusterNodePools(object):
def __init__(
self,
name: str = None,
config: dict = None,
initial_node_count: int = None,
locations: list = None,
self_link: str = None,
version: str = None,
instance_group_urls: list = None,
status: str = None,
status_message: str = None,
autoscaling: dict = None,
management: dict = None,
max_pods_constraint: dict = None,
conditions: list = None,
pod_ipv4_cidr_size: int = None,
upgrade_settings: dict = None,
):
self.name = name
self.config = config
self.initial_node_count = initial_node_count
self.locations = locations
self.self_link = self_link
self.version = version
self.instance_group_urls = instance_group_urls
self.status = status
self.status_message = status_message
self.autoscaling = autoscaling
self.management = management
self.max_pods_constraint = max_pods_constraint
self.conditions = conditions
self.pod_ipv4_cidr_size = pod_ipv4_cidr_size
self.upgrade_settings = upgrade_settings
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePools()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if ClusterNodePoolsConfig.to_proto(resource.config):
res.config.CopyFrom(ClusterNodePoolsConfig.to_proto(resource.config))
else:
res.ClearField("config")
if Primitive.to_proto(resource.initial_node_count):
res.initial_node_count = Primitive.to_proto(resource.initial_node_count)
if Primitive.to_proto(resource.locations):
res.locations.extend(Primitive.to_proto(resource.locations))
if Primitive.to_proto(resource.self_link):
res.self_link = Primitive.to_proto(resource.self_link)
if Primitive.to_proto(resource.version):
res.version = Primitive.to_proto(resource.version)
if Primitive.to_proto(resource.instance_group_urls):
res.instance_group_urls.extend(
Primitive.to_proto(resource.instance_group_urls)
)
if ClusterNodePoolsStatusEnum.to_proto(resource.status):
res.status = ClusterNodePoolsStatusEnum.to_proto(resource.status)
if Primitive.to_proto(resource.status_message):
res.status_message = Primitive.to_proto(resource.status_message)
if ClusterNodePoolsAutoscaling.to_proto(resource.autoscaling):
res.autoscaling.CopyFrom(
ClusterNodePoolsAutoscaling.to_proto(resource.autoscaling)
)
else:
res.ClearField("autoscaling")
if ClusterNodePoolsManagement.to_proto(resource.management):
res.management.CopyFrom(
ClusterNodePoolsManagement.to_proto(resource.management)
)
else:
res.ClearField("management")
if ClusterNodePoolsMaxPodsConstraint.to_proto(resource.max_pods_constraint):
res.max_pods_constraint.CopyFrom(
ClusterNodePoolsMaxPodsConstraint.to_proto(resource.max_pods_constraint)
)
else:
res.ClearField("max_pods_constraint")
if ClusterNodePoolsConditionsArray.to_proto(resource.conditions):
res.conditions.extend(
ClusterNodePoolsConditionsArray.to_proto(resource.conditions)
)
if Primitive.to_proto(resource.pod_ipv4_cidr_size):
res.pod_ipv4_cidr_size = Primitive.to_proto(resource.pod_ipv4_cidr_size)
if ClusterNodePoolsUpgradeSettings.to_proto(resource.upgrade_settings):
res.upgrade_settings.CopyFrom(
ClusterNodePoolsUpgradeSettings.to_proto(resource.upgrade_settings)
)
else:
res.ClearField("upgrade_settings")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePools(
name=Primitive.from_proto(resource.name),
config=ClusterNodePoolsConfig.from_proto(resource.config),
initial_node_count=Primitive.from_proto(resource.initial_node_count),
locations=Primitive.from_proto(resource.locations),
self_link=Primitive.from_proto(resource.self_link),
version=Primitive.from_proto(resource.version),
instance_group_urls=Primitive.from_proto(resource.instance_group_urls),
status=ClusterNodePoolsStatusEnum.from_proto(resource.status),
status_message=Primitive.from_proto(resource.status_message),
autoscaling=ClusterNodePoolsAutoscaling.from_proto(resource.autoscaling),
management=ClusterNodePoolsManagement.from_proto(resource.management),
max_pods_constraint=ClusterNodePoolsMaxPodsConstraint.from_proto(
resource.max_pods_constraint
),
conditions=ClusterNodePoolsConditionsArray.from_proto(resource.conditions),
pod_ipv4_cidr_size=Primitive.from_proto(resource.pod_ipv4_cidr_size),
upgrade_settings=ClusterNodePoolsUpgradeSettings.from_proto(
resource.upgrade_settings
),
)
class ClusterNodePoolsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterNodePools.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterNodePools.from_proto(i) for i in resources]
class ClusterNodePoolsConfig(object):
def __init__(
self,
machine_type: str = None,
disk_size_gb: int = None,
oauth_scopes: list = None,
service_account: str = None,
metadata: dict = None,
image_type: str = None,
labels: dict = None,
local_ssd_count: int = None,
tags: list = None,
preemptible: bool = None,
accelerators: list = None,
disk_type: str = None,
min_cpu_platform: str = None,
workload_metadata_config: dict = None,
taints: list = None,
sandbox_config: dict = None,
node_group: str = None,
reservation_affinity: dict = None,
shielded_instance_config: dict = None,
linux_node_config: dict = None,
kubelet_config: dict = None,
boot_disk_kms_key: str = None,
):
self.machine_type = machine_type
self.disk_size_gb = disk_size_gb
self.oauth_scopes = oauth_scopes
self.service_account = service_account
self.metadata = metadata
self.image_type = image_type
self.labels = labels
self.local_ssd_count = local_ssd_count
self.tags = tags
self.preemptible = preemptible
self.accelerators = accelerators
self.disk_type = disk_type
self.min_cpu_platform = min_cpu_platform
self.workload_metadata_config = workload_metadata_config
self.taints = taints
self.sandbox_config = sandbox_config
self.node_group = node_group
self.reservation_affinity = reservation_affinity
self.shielded_instance_config = shielded_instance_config
self.linux_node_config = linux_node_config
self.kubelet_config = kubelet_config
self.boot_disk_kms_key = boot_disk_kms_key
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfig()
if Primitive.to_proto(resource.machine_type):
res.machine_type = Primitive.to_proto(resource.machine_type)
if Primitive.to_proto(resource.disk_size_gb):
res.disk_size_gb = Primitive.to_proto(resource.disk_size_gb)
if Primitive.to_proto(resource.oauth_scopes):
res.oauth_scopes.extend(Primitive.to_proto(resource.oauth_scopes))
if Primitive.to_proto(resource.service_account):
res.service_account = Primitive.to_proto(resource.service_account)
if Primitive.to_proto(resource.metadata):
res.metadata = Primitive.to_proto(resource.metadata)
if Primitive.to_proto(resource.image_type):
res.image_type = Primitive.to_proto(resource.image_type)
if Primitive.to_proto(resource.labels):
res.labels = Primitive.to_proto(resource.labels)
if Primitive.to_proto(resource.local_ssd_count):
res.local_ssd_count = Primitive.to_proto(resource.local_ssd_count)
if Primitive.to_proto(resource.tags):
res.tags.extend(Primitive.to_proto(resource.tags))
if Primitive.to_proto(resource.preemptible):
res.preemptible = Primitive.to_proto(resource.preemptible)
if ClusterNodePoolsConfigAcceleratorsArray.to_proto(resource.accelerators):
res.accelerators.extend(
ClusterNodePoolsConfigAcceleratorsArray.to_proto(resource.accelerators)
)
if Primitive.to_proto(resource.disk_type):
res.disk_type = Primitive.to_proto(resource.disk_type)
if Primitive.to_proto(resource.min_cpu_platform):
res.min_cpu_platform = Primitive.to_proto(resource.min_cpu_platform)
if ClusterNodePoolsConfigWorkloadMetadataConfig.to_proto(
resource.workload_metadata_config
):
res.workload_metadata_config.CopyFrom(
ClusterNodePoolsConfigWorkloadMetadataConfig.to_proto(
resource.workload_metadata_config
)
)
else:
res.ClearField("workload_metadata_config")
if ClusterNodePoolsConfigTaintsArray.to_proto(resource.taints):
res.taints.extend(
ClusterNodePoolsConfigTaintsArray.to_proto(resource.taints)
)
if ClusterNodePoolsConfigSandboxConfig.to_proto(resource.sandbox_config):
res.sandbox_config.CopyFrom(
ClusterNodePoolsConfigSandboxConfig.to_proto(resource.sandbox_config)
)
else:
res.ClearField("sandbox_config")
if Primitive.to_proto(resource.node_group):
res.node_group = Primitive.to_proto(resource.node_group)
if ClusterNodePoolsConfigReservationAffinity.to_proto(
resource.reservation_affinity
):
res.reservation_affinity.CopyFrom(
ClusterNodePoolsConfigReservationAffinity.to_proto(
resource.reservation_affinity
)
)
else:
res.ClearField("reservation_affinity")
if ClusterNodePoolsConfigShieldedInstanceConfig.to_proto(
resource.shielded_instance_config
):
res.shielded_instance_config.CopyFrom(
ClusterNodePoolsConfigShieldedInstanceConfig.to_proto(
resource.shielded_instance_config
)
)
else:
res.ClearField("shielded_instance_config")
if ClusterNodePoolsConfigLinuxNodeConfig.to_proto(resource.linux_node_config):
res.linux_node_config.CopyFrom(
ClusterNodePoolsConfigLinuxNodeConfig.to_proto(
resource.linux_node_config
)
)
else:
res.ClearField("linux_node_config")
if ClusterNodePoolsConfigKubeletConfig.to_proto(resource.kubelet_config):
res.kubelet_config.CopyFrom(
ClusterNodePoolsConfigKubeletConfig.to_proto(resource.kubelet_config)
)
else:
res.ClearField("kubelet_config")
if Primitive.to_proto(resource.boot_disk_kms_key):
res.boot_disk_kms_key = Primitive.to_proto(resource.boot_disk_kms_key)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePoolsConfig(
machine_type=Primitive.from_proto(resource.machine_type),
disk_size_gb=Primitive.from_proto(resource.disk_size_gb),
oauth_scopes=Primitive.from_proto(resource.oauth_scopes),
service_account=Primitive.from_proto(resource.service_account),
metadata=Primitive.from_proto(resource.metadata),
image_type=Primitive.from_proto(resource.image_type),
labels=Primitive.from_proto(resource.labels),
local_ssd_count=Primitive.from_proto(resource.local_ssd_count),
tags=Primitive.from_proto(resource.tags),
preemptible=Primitive.from_proto(resource.preemptible),
accelerators=ClusterNodePoolsConfigAcceleratorsArray.from_proto(
resource.accelerators
),
disk_type=Primitive.from_proto(resource.disk_type),
min_cpu_platform=Primitive.from_proto(resource.min_cpu_platform),
workload_metadata_config=ClusterNodePoolsConfigWorkloadMetadataConfig.from_proto(
resource.workload_metadata_config
),
taints=ClusterNodePoolsConfigTaintsArray.from_proto(resource.taints),
sandbox_config=ClusterNodePoolsConfigSandboxConfig.from_proto(
resource.sandbox_config
),
node_group=Primitive.from_proto(resource.node_group),
reservation_affinity=ClusterNodePoolsConfigReservationAffinity.from_proto(
resource.reservation_affinity
),
shielded_instance_config=ClusterNodePoolsConfigShieldedInstanceConfig.from_proto(
resource.shielded_instance_config
),
linux_node_config=ClusterNodePoolsConfigLinuxNodeConfig.from_proto(
resource.linux_node_config
),
kubelet_config=ClusterNodePoolsConfigKubeletConfig.from_proto(
resource.kubelet_config
),
boot_disk_kms_key=Primitive.from_proto(resource.boot_disk_kms_key),
)
class ClusterNodePoolsConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterNodePoolsConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterNodePoolsConfig.from_proto(i) for i in resources]
class ClusterNodePoolsConfigAccelerators(object):
def __init__(self, accelerator_count: int = None, accelerator_type: str = None):
self.accelerator_count = accelerator_count
self.accelerator_type = accelerator_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfigAccelerators()
if Primitive.to_proto(resource.accelerator_count):
res.accelerator_count = Primitive.to_proto(resource.accelerator_count)
if Primitive.to_proto(resource.accelerator_type):
res.accelerator_type = Primitive.to_proto(resource.accelerator_type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePoolsConfigAccelerators(
accelerator_count=Primitive.from_proto(resource.accelerator_count),
accelerator_type=Primitive.from_proto(resource.accelerator_type),
)
class ClusterNodePoolsConfigAcceleratorsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterNodePoolsConfigAccelerators.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterNodePoolsConfigAccelerators.from_proto(i) for i in resources]
class ClusterNodePoolsConfigWorkloadMetadataConfig(object):
def __init__(self, mode: str = None):
self.mode = mode
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfigWorkloadMetadataConfig()
if ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum.to_proto(resource.mode):
res.mode = ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum.to_proto(
resource.mode
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePoolsConfigWorkloadMetadataConfig(
mode=ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum.from_proto(
resource.mode
),
)
class ClusterNodePoolsConfigWorkloadMetadataConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
ClusterNodePoolsConfigWorkloadMetadataConfig.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
ClusterNodePoolsConfigWorkloadMetadataConfig.from_proto(i)
for i in resources
]
class ClusterNodePoolsConfigTaints(object):
def __init__(self, key: str = None, value: str = None, effect: str = None):
self.key = key
self.value = value
self.effect = effect
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfigTaints()
if Primitive.to_proto(resource.key):
res.key = Primitive.to_proto(resource.key)
if Primitive.to_proto(resource.value):
res.value = Primitive.to_proto(resource.value)
if ClusterNodePoolsConfigTaintsEffectEnum.to_proto(resource.effect):
res.effect = ClusterNodePoolsConfigTaintsEffectEnum.to_proto(
resource.effect
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePoolsConfigTaints(
key=Primitive.from_proto(resource.key),
value=Primitive.from_proto(resource.value),
effect=ClusterNodePoolsConfigTaintsEffectEnum.from_proto(resource.effect),
)
class ClusterNodePoolsConfigTaintsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterNodePoolsConfigTaints.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterNodePoolsConfigTaints.from_proto(i) for i in resources]
class ClusterNodePoolsConfigSandboxConfig(object):
def __init__(self, type: str = None):
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfigSandboxConfig()
if ClusterNodePoolsConfigSandboxConfigTypeEnum.to_proto(resource.type):
res.type = ClusterNodePoolsConfigSandboxConfigTypeEnum.to_proto(
resource.type
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePoolsConfigSandboxConfig(
type=ClusterNodePoolsConfigSandboxConfigTypeEnum.from_proto(resource.type),
)
class ClusterNodePoolsConfigSandboxConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterNodePoolsConfigSandboxConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterNodePoolsConfigSandboxConfig.from_proto(i) for i in resources]
class ClusterNodePoolsConfigReservationAffinity(object):
def __init__(
self, consume_reservation_type: str = None, key: str = None, values: list = None
):
self.consume_reservation_type = consume_reservation_type
self.key = key
self.values = values
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfigReservationAffinity()
if ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum.to_proto(
resource.consume_reservation_type
):
res.consume_reservation_type = ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum.to_proto(
resource.consume_reservation_type
)
if Primitive.to_proto(resource.key):
res.key = Primitive.to_proto(resource.key)
if Primitive.to_proto(resource.values):
res.values.extend(Primitive.to_proto(resource.values))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePoolsConfigReservationAffinity(
consume_reservation_type=ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum.from_proto(
resource.consume_reservation_type
),
key=Primitive.from_proto(resource.key),
values=Primitive.from_proto(resource.values),
)
class ClusterNodePoolsConfigReservationAffinityArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
ClusterNodePoolsConfigReservationAffinity.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
ClusterNodePoolsConfigReservationAffinity.from_proto(i) for i in resources
]
class ClusterNodePoolsConfigShieldedInstanceConfig(object):
def __init__(
self, enable_secure_boot: bool = None, enable_integrity_monitoring: bool = None
):
self.enable_secure_boot = enable_secure_boot
self.enable_integrity_monitoring = enable_integrity_monitoring
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfigShieldedInstanceConfig()
if Primitive.to_proto(resource.enable_secure_boot):
res.enable_secure_boot = Primitive.to_proto(resource.enable_secure_boot)
if Primitive.to_proto(resource.enable_integrity_monitoring):
res.enable_integrity_monitoring = Primitive.to_proto(
resource.enable_integrity_monitoring
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePoolsConfigShieldedInstanceConfig(
enable_secure_boot=Primitive.from_proto(resource.enable_secure_boot),
enable_integrity_monitoring=Primitive.from_proto(
resource.enable_integrity_monitoring
),
)
class ClusterNodePoolsConfigShieldedInstanceConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
ClusterNodePoolsConfigShieldedInstanceConfig.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
ClusterNodePoolsConfigShieldedInstanceConfig.from_proto(i)
for i in resources
]
class ClusterNodePoolsConfigLinuxNodeConfig(object):
def __init__(self, sysctls: dict = None):
self.sysctls = sysctls
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfigLinuxNodeConfig()
if Primitive.to_proto(resource.sysctls):
res.sysctls = Primitive.to_proto(resource.sysctls)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterNodePoolsConfigLinuxNodeConfig(
sysctls=Primitive.from_proto(resource.sysctls),
)
class ClusterNodePoolsConfigLinuxNodeConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterNodePoolsConfigLinuxNodeConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterNodePoolsConfigLinuxNodeConfig.from_proto(i) for i in resources]
class ClusterNodePoolsConfigKubeletConfig(object):
def __init__(
self,
cpu_manager_policy: str = None,
cpu_cfs_quota: bool = None,
cpu_cfs_quota_period: str = None,
):
self.cpu_manager_policy = cpu_manager_policy
self.cpu_cfs_quota = cpu_cfs_quota
self.cpu_cfs_quota_period = cpu_cfs_quota_period
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterNodePoolsConfigKubeletConfig()
if Primitive.to_proto(resource.cpu_manager_policy):
res.cpu_manager_policy = Primitive.to_proto(resource.cpu_manager_policy)
if Primitive.to_proto(resource.cpu_cfs_quota):
res.cpu_cfs_quota = | |
from autogoal.exceptions import InterfaceIncompatibleError
# import types
import inspect
# import pprint
from typing import Mapping
from autogoal.grammar import Symbol, Union, Empty, Subset
# from scipy.sparse.base import spmatrix
# from numpy import ndarray
# def algorithm(input_type, output_type):
# def run_method(self, input: input_type) -> output_type:
# pass
# def body(ns):
# ns["run"] = run_method
# return types.new_class(
# name="Algorithm[%s, %s]" % (input_type, output_type),
# bases=(Interface,),
# exec_body=body,
# )
class Interface:
@classmethod
def is_compatible(cls, other_cls):
own_methods = _get_annotations(cls, ignore=["generate_cfg", "is_compatible"])
if not inspect.isclass(other_cls):
return False
if issubclass(other_cls, Interface):
return False
type_methods = _get_annotations(other_cls)
return _compatible_annotations(own_methods, type_methods)
@classmethod
def generate_cfg(cls, grammar, head):
symbol = head or Symbol(cls.__name__)
compatible = []
for _, other_cls in grammar.namespace.items():
if cls.is_compatible(other_cls):
compatible.append(other_cls)
if not compatible:
raise InterfaceIncompatibleError(cls.__name__)
return Union(symbol.name, *compatible).generate_cfg(grammar, symbol)
class Distinct:
def __init__(self, interface: Interface, exceptions=None):
self.interface = interface
self.exceptions = exceptions
def generate_cfg(self, grammar, head):
symbol = head or Symbol(self.__class__.__name__)
compatible = []
for _, other_cls in grammar.namespace.items():
if other_cls in self.exceptions:
continue
if hasattr(other_cls, "__name__") and other_cls.__name__ in self.exceptions:
continue
if self.interface.is_compatible(other_cls):
compatible.append(other_cls)
if not compatible:
raise ValueError(
"Cannot find compatible implementations for <class %s>" % self.interface
)
return Subset(symbol.name, *compatible).generate_cfg(grammar, symbol)
# def conforms(type1, type2):
# if inspect.isclass(type1) and inspect.isclass(type2):
# return issubclass(type1, type2)
# if hasattr(type1, "__conforms__") and type1.__conforms__(type2):
# return True
# if hasattr(type2, "__rconforms__") and type2.__rconforms__(type1):
# return True
# return False
def _compatible_annotations(
methods_if: Mapping[str, inspect.Signature],
methods_im: Mapping[str, inspect.Signature],
):
for name, mif in methods_if.items():
if not name in methods_im:
return False
mim = methods_im[name]
for name, param_if in mif.parameters.items():
if not name in mim.parameters:
return False
param_im = mim.parameters[name]
ann_if = param_if.annotation
if ann_if == inspect.Parameter.empty:
continue
ann_im = param_im.annotation
if not conforms(ann_if, ann_im):
return False
return_if = mif.return_annotation
if return_if == inspect.Parameter.empty:
continue
return_im = mim.return_annotation
if not conforms(return_im, return_if):
return False
return True
def _get_annotations(clss, ignore=[]):
"""
Computes the annotations of all public methods in type `clss`.
##### Examples
```python
>>> class A:
... def f(self, input: int) -> float:
... pass
>>> _get_annotations(A)
{'f': <Signature (self, input:int) -> float>}
```
"""
methods = inspect.getmembers(
clss, lambda m: inspect.ismethod(m) or inspect.isfunction(m)
)
signatures = {
name: inspect.signature(method)
for name, method in methods
if not name.startswith("_")
}
for name in ignore:
signatures.pop(name, None)
return signatures
# # def make_list_wrapper(algorithm):
# # from autogoal.kb._algorithm import _get_annotations
# # input_type, output_type = _get_annotations(algorithm)
# # name = f"List[{algorithm.__name__}]"
# # def wrap_list(types):
# # if isinstance(types, Tuple):
# # return Tuple(*(List(t) for t in types.inner))
# # return List(types)
# # def init_method(self, inner: algorithm):
# # self.inner = inner
# # def run_method(self, input: wrap_list(input_type)) -> wrap_list(output_type):
# # return [self.inner.run(x) for x in xs]
# # def repr_method(self):
# # return f"{name}(inner={repr(self.inner)})"
# # def getattr_method(self, attr):
# # return getattr(self.inner, attr)
# # def body(ns):
# # ns["__init__"] = init_method
# # ns["run"] = run_method
# # ns["__repr__"] = repr_method
# # ns["__getattr__"] = getattr_method
# # return types.new_class(name=name, bases=(), exec_body=body)
# # def build_composite_list(input_type, output_type, depth=1):
# # def wrap(t, d):
# # if d == 0:
# # return t
# # return List(wrap(t, d - 1))
# # input_wrapper = wrap(input_type, depth)
# # output_wrapper = wrap(output_type, depth)
# # # name = "ListAlgorithm" # % (input_wrapper, output_wrapper)
# # name = "ListAlgorithm" #[%s, %s]" % (input_wrapper, output_wrapper)
# # def init_method(self, inner: algorithm(input_type, output_type)):
# # self.inner = inner
# # def run_method(self, input: input_wrapper) -> output_wrapper:
# # def wrap_run(xs, d):
# # if d == 0:
# # return self.inner.run(xs)
# # return [wrap_run(x, d - 1) for x in xs]
# # return wrap_run(input, depth)
# # def repr_method(self):
# # return f"{name}(inner={repr(self.inner)})"
# # def getattr_method(self, attr):
# # return getattr(self.inner, attr)
# # def reduce_method(self):
# # return (
# # build_composite_list_instance,
# # (input_type, output_type, self.inner)
# # )
# # def body(ns):
# # ns["__init__"] = init_method
# # ns["run"] = run_method
# # ns["__repr__"] = repr_method
# # ns["__getattr__"] = getattr_method
# # ns["__reduce__"] = reduce_method
# # return types.new_class(name=name, bases=(), exec_body=body)
# # def build_composite_list_instance(input_type, output_type, inner_algorithm):
# # """
# # Build a ListAlgorithm[...] type and instantiate it directly on a given algorithm.
# # """
# # return build_composite_list(input_type, output_type)(inner_algorithm)
# # def build_composite_tuple(index, input_type: "Tuple", output_type: "Tuple"):
# # """
# # Dynamically generate a class `TupleAlgorithm` that wraps
# # another algorithm to receive a Tuple but pass only one of the
# # parameters to the internal algorithm.
# # """
# # internal_input = input_type.inner[index]
# # internal_output = output_type.inner[index]
# # name = "TupleAlgorithm" #[%s, %s]" % (input_type, output_type)
# # def init_method(self, inner: algorithm(internal_input, internal_output)):
# # self.inner = inner
# # def run_method(self, input: input_type) -> output_type:
# # elements = list(input)
# # elements[index] = self.inner.run(elements[index])
# # return tuple(elements)
# # def repr_method(self):
# # return f"{name}(inner={repr(self.inner)})"
# # def getattr_method(self, attr):
# # return getattr(self.inner, attr)
# # def reduce_method(self):
# # return (
# # build_composite_tuple_instance,
# # (index, input_type, output_type, self.inner)
# # )
# # def body(ns):
# # ns["__init__"] = init_method
# # ns["run"] = run_method
# # ns["__repr__"] = repr_method
# # ns["__getattr__"] = getattr_method
# # ns["__reduce__"] = reduce_method
# # return types.new_class(name=name, bases=(), exec_body=body)
# # def build_composite_tuple_instance(index, input_type, output_type, inner_algorithm):
# # """
# # Build a TupleAlgorithm[...] type and instantiate it directly on a given algorithm.
# # """
# # return build_composite_tuple(index, input_type, output_type)(inner_algorithm)
# # class DataType:
# # def __init__(self, **tags):
# # self.tags = tags
# # def get_tag(self, tag):
# # return self.tags.get(tag, None)
# # def __repr__(self):
# # # tags = ", ".join(
# # # f"{key}={value}"
# # # for key, value in sorted(self.tags.items(), key=lambda t: t[0])
# # # )
# # return f"{self.__class__.__name__}()" #({tags})"
# # def __eq__(self, other):
# # return repr(self) == repr(other)
# # def __hash__(self):
# # return hash(repr(self))
# # @property
# # def __name__(self):
# # return self.__class__.__name__
# # def __conforms__(self, other):
# # return issubclass(self.__class__, other.__class__)
# # def infer_type(obj):
# # """
# # Attempts to automatically infer the most precise semantic type for `obj`.
# # ##### Parameters
# # * `obj`: Object to detect its semantic type.
# # ##### Raises
# # * `TypeError`: if no valid semantic type was found that matched `obj`.
# # ##### Examples
# # * Natural language
# # ```python
# # >>> infer_type("hello")
# # Word()
# # >>> infer_type("hello world")
# # Sentence()
# # >>> infer_type("Hello Word. It is raining.")
# # Document()
# # ```
# # * Vectors
# # ```
# # >>> import numpy as np
# # >>> infer_type(np.asarray(["A", "B", "C", "D"]))
# # CategoricalVector()
# # >>> infer_type(np.asarray([0.0, 1.1, 2.1, 0.2]))
# # ContinuousVector()
# # >>> infer_type(np.asarray([0, 1, 1, 0]))
# # DiscreteVector()
# # ```
# # * Matrices
# # ```
# # >>> import numpy as np
# # >>> infer_type(np.random.randn(10,10))
# # MatrixContinuousDense()
# # >>> import scipy.sparse as sp
# # >>> infer_type(sp.coo_matrix((10,10)))
# # MatrixContinuousSparse()
# # ```
# # """
# # if isinstance(obj, str):
# # if " " not in obj:
# # return Word()
# # if "." not in obj:
# # return Sentence()
# # return Document()
# # if isinstance(obj, list):
# # internal_types = set([infer_type(x) for x in obj])
# # for test_type in [Document(), Sentence(), Word()]:
# # if test_type in internal_types:
# # return List(test_type)
# # if hasattr(obj, "shape"):
# # if len(obj.shape) == 1:
# # if isinstance(obj, ndarray):
# # if obj.dtype.kind == "U":
# # return CategoricalVector()
# # if obj.dtype.kind == "i":
# # return DiscreteVector()
# # if obj.dtype.kind == "f":
# # return ContinuousVector()
# # if len(obj.shape) == 2:
# # if isinstance(obj, spmatrix):
# # return MatrixContinuousSparse()
# # if isinstance(obj, ndarray):
# # if obj.dtype.kind == "O":
# # return MatrixCategorical()
# # else:
# # return MatrixContinuousDense()
# # raise TypeError("Cannot infer type for %r" % obj)
# # class Text(DataType):
# # pass
# # class Word(Text):
# # pass
# # class Stem(DataType):
# # pass
# # class Sentence(Text):
# # pass
# # class Document(Text):
# # pass
# # class Category(DataType):
# # pass
# # class Vector(DataType):
# # pass
# # class Matrix(DataType):
# # pass
# # class DenseMatrix(Matrix):
# # pass
# # class SparseMatrix(Matrix):
# # pass
# # class ContinuousVector(Vector):
# # pass
# # class DiscreteVector(Vector):
# # pass
# # class CategoricalVector(Vector):
# # pass
# # class MatrixContinuous(Matrix):
# # pass
# # class MatrixCategorical(Matrix):
# # pass
# # class MatrixContinuousDense(MatrixContinuous, DenseMatrix):
# # pass
# # class MatrixContinuousSparse(MatrixContinuous, SparseMatrix):
# # pass
# # class Entity(DataType):
# # pass
# # class Summary(Document):
# # pass
# # class Sentiment(DataType):
# # pass
# # class Synset(DataType):
# # pass
# # class Postag(DataType):
# # pass
# # class Chunktag(DataType):
# # pass
# # class Tensor3(DataType):
# # pass
# # class Tensor4(DataType):
# # pass
# # class Flags(DataType):
# # pass
# # class List(DataType):
# # def __init__(self, inner):
# # self.inner = inner
# # # super().__init__(**inner.tags)
# # def depth(self):
# # | |
# TODO: Add description to the integration in <root>/Packs/Coralogix/Integrations/Coralogix/Coralogix_description.md
from datetime import timezone
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import urllib3
import dateutil.parser
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
''' SUPPLEMENTAL FUNCTIONS '''
def strip_or_empty(o):
try:
if o is not None:
return o.strip()
else:
o = ''
except AttributeError:
o = ''
return o
def flatten_json(y):
"""
This supplemental method flattens a JSON by renaming subfields using dots as delimiters between levels
Args:
y - the JSON to flatten
Returns:
A flattened JSON string
"""
out = {}
def flatten(x, name=''):
# If the Nested key-value
# pair is of dict type
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '.')
# If the Nested key-value
# pair is of list type
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '.')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
def elastic_output_as_table(raw_elastic_json_obj):
"""
This supplemental method converts an Elasticsearch output JSON object to a Demisto table
Args:
raw_elastic_json_obj - The raw Elasticsearch output as received from the Elasticsearch API
filter_out_columns - A strings array which contains the list of columns to remove from the output
Returns:
An object that represents a Demisto table with the data from the Elasticsearch output
"""
# fill in the table and the raw contents objects
table = []
for raw_row in raw_elastic_json_obj["hits"]["hits"]:
raw_row_flattened = flatten_json(raw_row["_source"])
table.append(raw_row_flattened)
return table
''' COMMANDS '''
def coralogix_search_command(
cgx_private_key,
cgx_endpoint_url,
query,
columns_to_include_in_human_readable,
application_name,
subsystem_name,
severity,
since_timestamp=None,
to_timestamp="now",
max_items_to_retrieve=-1):
"""
This method handles the cgx_search command which allows the user to search for data on a Coralogix account
Args:
cgx_private_key - String, Coralogix Private Key (From the integration's settings page)
cgx_endpoint_url - Coralogix ES-API base URL (From the integration's settings page)
query - The Lucene query to send to Coralogix
columns_to_include_in_human_readable - Strings Array, Columns to include in the human readable output
application_name - String, The Coralogix application name to look for (optional)
subsystem_name - String, The Coralogix subsystem name to look for (optional)
severity - String, The Coralogix severity to look for (optional)
Returns:
A demisto CommandResults object
"""
http = urllib3.PoolManager()
pre_query = ""
if len(strip_or_empty(application_name)) > 0:
pre_query = 'coralogix.metadata.applicationName:"' + application_name + '"'
if len(strip_or_empty(pre_query)) > 0:
pre_query = pre_query + ' AND '
if len(strip_or_empty(subsystem_name)) > 0:
pre_query = pre_query + 'coralogix.metadata.subsystemName:"' + subsystem_name + '"'
if len(strip_or_empty(pre_query)) > 0:
pre_query = pre_query + ' AND '
if len(strip_or_empty(severity)) > 0:
pre_query = pre_query + 'coralogix.severity_str:"' + severity + '"'
if len(strip_or_empty(pre_query)) > 0:
query = '(' + pre_query + ') AND (' + query + ')'
request_data = {
"query": {
"bool": {
"must": [
{
"query_string": {
"query": query
}
}
]
}
}
}
if since_timestamp is not None:
request_data["query"]["bool"]["must"].append({
"range": {
"coralogix.timestamp": {
"gt": since_timestamp,
"lt": to_timestamp
}
}
})
if since_timestamp is None and to_timestamp != "now":
raise ValueError("to_timestamp can only be set together with since_timestamp")
if max_items_to_retrieve > 0:
request_data["size"] = max_items_to_retrieve
encoded_data = json.dumps(request_data).encode('utf-8')
demisto.info('Calling `' + cgx_endpoint_url + '` with these args ' + json.dumps(request_data) + ' ...')
r = http.request(
'POST',
cgx_endpoint_url,
body=encoded_data,
headers={
"token": cgx_private_key,
"Content-type": "application/json"
})
results_raw = json.loads(r.data.decode('utf-8'))
if 'hits' in results_raw and 'hits' in results_raw['hits']:
formatted_data = elastic_output_as_table(raw_elastic_json_obj=results_raw)
return CommandResults(
raw_response=results_raw,
readable_output=tableToMarkdown(
'Coralogix Search Results',
formatted_data,
columns_to_include_in_human_readable,
removeNull=True),
outputs_key_field="_id",
outputs_prefix='Coralogix.SearchResults',
outputs=results_raw['hits']['hits']
)
else:
return CommandResults(
raw_response=results_raw,
readable_output=tableToMarkdown(
'Coralogix Search Results',
[],
columns_to_include_in_human_readable,
removeNull=True),
outputs_key_field="_id",
outputs_prefix='Coralogix.SearchResults',
outputs=[]
)
def coralogix_tag_command(
private_key,
coralogix_url,
application_name,
subsystem_name,
tag_name,
tag_timestamp="",
tag_icon_url=""):
"""
This method handles the cgx_tag command which allows the user to tag a certain timestamp on Coralogix (for example to mark
the time at which an incident has occurred)
Args:
private_key - String, Coralogix Private Key (From the integration's settings page)
coralogix_url - String, Coralogix Web-API base URL (From the integration's settings page)
application_name - String, The application name that will be associated with the tag (Would probably be
something like 'Demisto')
subsystem_name - String, The subsystem name that will be associated with the tag (Would probably be
something like 'Demisto')
tag_name - String, The name of the tag that will be created
tag_timestamp - String, The date at which to put the tag in Coralogix. (Optional, if not set,
the current time will be used by Coralogix)
tag_icon_url - String, A URL for an image that will be used in the tag. Cannot exceed 50KB. (Optional, if not set,
Coralogix will automatically choose an image)
Returns:
The raw response from Coralogix WebAPI. If it is JSON parsable it will return it as an object, otherwise - as a string.
"""
query_string_params = {
'key': private_key,
'application': application_name,
'subsystem': subsystem_name,
'name': tag_name
}
if len(tag_timestamp) > 0:
query_string_params["timestamp"] = tag_timestamp
if len(tag_icon_url) > 0:
query_string_params["iconUrl"] = tag_icon_url
demisto.info('Calling `' + coralogix_url + '` using GET with these args ' + json.dumps(query_string_params) + ' ...')
response = requests.post(coralogix_url, params=query_string_params)
try:
results_obj = json.loads(response.text)
if "tag_status" in results_obj and results_obj["tag_status"] == "SUCCESSFUL":
return CommandResults(
raw_response=results_obj,
outputs_prefix='Coralogix.TagResults',
readable_output='Tag added successfully',
outputs=['Tag was successfully created at ' + tag_timestamp + ' under the name ' + tag_name]
)
else:
return CommandResults(
raw_response=results_obj,
outputs_prefix='Coralogix.TagResults',
readable_output='Failed to add the requested tag',
outputs=['Failed to tag the following timestamp ' + tag_timestamp + ' under the name ' + tag_name]
)
except json.JSONDecodeError:
raise ValueError('Failed to tag the following timestamp ' + tag_timestamp + ' under the name ' + tag_name
+ '. This is the raw response:\n' + response.text)
def test_module(cgx_private_key, cgx_endpoint_url):
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
cgx_private_key: Coralogix Private Key
cgx_endpoint_url: Coralogix Cluster Base URL for the Coralogix ES-API
Returns:
'ok' if test passed, anything else will fail the test.
"""
request_data = {
"query": {
"bool": {
"must": [
{
"range": {
"coralogix.timestamp": {
"gte": "now-15m",
"lt": "now"
}
}
}
]
}
}
}
try:
encoded_data = json.dumps(request_data).encode('utf-8')
demisto.info('Calling `' + cgx_endpoint_url + '` with these args ' + json.dumps(request_data) + ' ...')
http = urllib3.PoolManager()
r = http.request(
'GET',
cgx_endpoint_url,
body=encoded_data,
headers={
"token": cgx_private_key,
"Content-type": "application/json"
})
results_raw = json.loads(r.data.decode('utf-8'))
except Exception as ex:
return 'Test failed (' + type(ex).__name__ + ')'
if results_raw is not None and 'hits' in results_raw:
return 'ok'
else:
return 'Test failed (No result was received from Coralogix or the response was unexpected `' + \
json.dumps(results_raw) + '`)'
def fetch_incidents(
cgx_private_key,
cgx_endpoint_url,
incidents_base_query,
application_name,
subsystem_name,
severity,
incidents_name_field,
incidents_first_fetch_range,
incidents_max_fetch,
columns_to_include_in_human_readable):
"""
This method handles querying Coralogix for incidents by using the configured query and parameters
Args:
cgx_private_key - String, Coralogix Private Key
cgx_endpoint_url - String, Coralogix Cluster Base URL for the Coralogix ES-API
application_name - String, The
subsystem_name - String,
severity - String
Returns:
Returns the incidents found in Coralogix
"""
last_run_timestamp = "now-" + str(incidents_first_fetch_range) + "d"
if "last_run_timestamp" in demisto.getLastRun():
last_run_timestamp = demisto.getLastRun().get('last_run_timestamp', last_run_timestamp)
raw_data = coralogix_search_command(
cgx_private_key=cgx_private_key,
cgx_endpoint_url=cgx_endpoint_url,
query=incidents_base_query,
application_name=application_name,
subsystem_name=subsystem_name,
severity=severity,
columns_to_include_in_human_readable=columns_to_include_in_human_readable,
since_timestamp=last_run_timestamp,
max_items_to_retrieve=incidents_max_fetch
).raw_response
newest_incident_date_obj = datetime(year=1970, month=1, day=1)
incidents = []
if "hits" in raw_data and "hits" in raw_data["hits"]:
for document in raw_data["hits"]["hits"]:
flattened_document = flatten_json(document["_source"])
incident_date = flattened_document['coralogix.timestamp']
incident = {
'name': flattened_document[incidents_name_field],
'occurred': incident_date + "Z",
'rawJSON': json.dumps(flattened_document)
}
incidents.append(incident)
incident_date_obj = dateutil.parser.parse(incident_date)
if incident_date_obj.replace(tzinfo=timezone.utc).timestamp() > \
newest_incident_date_obj.replace(tzinfo=timezone.utc).timestamp():
newest_incident_date_obj = incident_date_obj
demisto.setLastRun({"last_run_timestamp": newest_incident_date_obj.replace(tzinfo=timezone.utc).timestamp()})
return incidents
def main():
# CONSTANTS
columns_to_include_in_human_readable = [
'coralogix.timestamp',
'coralogix.severity_str',
'coralogix.metadata.applicationName',
'coralogix.metadata.subsystemName',
'security.source_ip',
'security.destination_ip',
'security.event_type',
'security.source_port',
'security.destination_port',
'security.connection_state_description',
'security.protocol',
'security.local_orig',
'security.local_respond',
'security.total_bytes',
'security.query',
'security.query_type_name',
'security.rcode_name',
'security.ra',
'security.rd',
'awsRegion',
'eventName',
'eventSource',
'sourceIPAddress',
'userIdentity.sessionContext.sessionIssuer.userName',
'userIdentity.type',
'recipientAccountId',
'source_ip',
'destination_ip',
'source_port',
'destination_port',
'protocol'
]
default_incidents_query = "alert_type_id:\"53d222e2-e7b2-4fa6-80d4-9935425d47dd\""
default_incidents_first_fetch_range = '3'
default_incidents_max_fetch = '50'
default_incident_description_field = "alert_name"
default_max_items_to_retrieve = "50"
# PARSE AND VALIDATE INTEGRATION PARAMS
private_key = strip_or_empty(demisto.params().get('privatekey', ''))
# get the service API url
tags_api_url = urljoin(strip_or_empty(demisto.params().get('webapi_url', '')), '/api/v1/addTagPost')
search_api_url = urljoin(strip_or_empty(demisto.params().get('esapi_url', '')), '/*/_search')
# Get other parameters
tag_application_name = | |
<reponame>arevi9176/Lightweight-Covid-Dashboard
#!/usr/bin/env python3
#
# LCD - Lightweight Covid Dashboard
# 28.03.20 (v0.1.0) - initial version
# 02.04.20 (v0.2.0) - added dataframe cache and view 'Average percentage increase in the last seven days'
# 03.04.20 (v0.2.1) - changed 'Average percentage increase' to 'Average growth rate'
# 15.04.20 (v0.3.0) - totally refactored and some bug fixes
# 19.04.20 (v0.4.0) - added trend in view 'Average growth rate in the last seven days'
# 21.06.20 (v0.5.0) - added continents based on country values; adjusted threshold values (days, min cases)
# 02.07.20 (v0.6.0) - new: 'force' url param to force data reload
#
__version__ = "v0.6.0"
import pandas
import urllib.request
from flask import Flask, render_template, request
from time import time, localtime, strftime
from lcd_country_data import get_country_population, get_country_continent
from math import log10, isinf, isnan
CONFIRMED_GLOBAL_URL = "https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
CONFIRMED_GLOBAL_FILE = "time_series_covid19_confirmed_global.csv"
DEATHS_GLOBAL_URL = "https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv"
DEATHS_GLOBAL_FILE = "time_series_covid19_deaths_global.csv"
RECOVERED_GLOBAL_URL = "https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv"
RECOVERED_GLOBAL_FILE = "time_series_covid19_recovered_global.csv"
class CovidData:
def __init__(self, filepath, url):
self.filepath = filepath
self.url = url
self.df = None
self.df_abs = None
self.df_dif = None
self.last_update_time = 0.0
def _insert_pop_column(self, df):
'''Insert population column in mn at pos 0 in dataframe.'''
population = []
countries = list(df.index.values)
for country in countries:
population.append(get_country_population(country))
df.insert(0, 'Population', population)
return df
def _insert_continents_column(self, df):
'''Insert continent column at pos 0 in dataframe.'''
countries = list(df["Country/Region"])
continents = [get_country_continent(country) for country in countries]
df.insert(0, 'Continent', continents)
return df
def _insert_continent_rows(self, df):
'''Compute values for continents based on countries an insert as rows'''
df_tmp = df.copy()
df_tmp = self._insert_continents_column(df_tmp)
date_columns = list(df_tmp.columns)[2:]
col_dict = {col:'sum' for col in date_columns}
df_tmp = df_tmp.groupby("Continent").agg(col_dict)
df_tmp.loc['[World]'] = df_tmp.sum() # insert 'World' row
df_tmp.index.name = 'Country/Region'
df_tmp = df_tmp.reset_index()
df_result = df.append(df_tmp)
return df_result.sort_values(df_result.columns[-1], ascending = False)
def _insert_residents_per_case_column(self, df):
'''
Insert residents per case column at pos 1 in dataframe.
Population column must exist!
'''
residents_per_case = []
populations = list(df['Population'])
latest_cases = list(df[df.columns[-1]])
for po, lc in zip(populations, latest_cases):
try:
residents_per_case.append(round(po * 1000000 / lc))
except ZeroDivisionError:
residents_per_case.append(0)
df.insert(1, 'Residents per case', residents_per_case)
return df
def _insert_trend_column(self, df, distance):
'''
Insert trend column at pos 2 in dataframe.
Trend means percentage rate of increase of two consecutive periods described by <distance> in days.
'''
diff1 = df[df.columns[-1]] - df[df.columns[-(1 + distance)]]
diff2 = df[df.columns[-(1 + distance)]] - df[df.columns[-(1 + 2 * distance)]]
quot = diff1 / diff2
quot = -(1 - quot) * 100
ql = [round(x) if not (isinf(x) or isnan(x)) else "n/a at this time" for x in list(quot)]
df.insert(2, 'Trend', ql)
return df
def _drop_rows_based_on_unique_indicis(self, df_source, df_target):
'''Drop every row in df_target where the row index is not contained in df_source'''
set_of_source_indicis = set(df_source.index)
set_of_target_indicis = set(df_target.index)
list_of_indicis_in_target_but_not_in_source = [i for i in set_of_target_indicis if i not in set_of_source_indicis]
df = df_target.drop(list_of_indicis_in_target_but_not_in_source)
return df
def _convert_all_floats_to_ints(self, df):
'''Convert all float numbers to int numbers in a given dataframe.'''
float_col = df.select_dtypes(include=['float64']) # this will select float columns only
for col in float_col.columns.values:
df[col] = df[col].astype('int64')
return df
def _condense_and_sort_dataframe(self, df):
'''Condense dataframe to only one country per row and sort by last column.'''
date_columns = list(df.columns)[1:]
col_dict = {}
for col in date_columns: col_dict[col] = 'sum'
df = df.groupby("Country/Region").agg(col_dict)
df = df.sort_values(df.columns[-1], ascending = False)
return df
def _compute_rel_dataframe(self, df):
'''
Compute dataframe with relative numbers (%) from dataframe with absolute numbers.
Relative numbers (percent values) are used for length of bargraphs.
Negative numbers are set to zero!
'''
residents_per_case = list(df['Residents per case']) # save column
population = list(df['Population']) # save column
trend = list(df['Trend']) # save column
df_tmp = df.drop(['Population','Residents per case', 'Trend'], axis=1) # delete columns from dataframe
max_value = df_tmp.max().max() # compute max value
df_rel = df_tmp.div(max_value).multiply(100).round(1) # compute relative values
df_rel[df_rel < 0] = 0 # set negative numbers to 0
df_rel.insert(0, 'Population', population) # restore column
df_rel.insert(1, 'Residents per case', residents_per_case) # restore column
df_rel.insert(2, 'Trend', trend) # restore column
return df_rel
def _compute_abs_dataframe(self):
df_abs = pandas.read_csv(self.filepath)
df_abs = df_abs.drop(["Province/State", "Lat", "Long"], axis=1)
df_abs = self._insert_continent_rows(df_abs)
df_abs = self._condense_and_sort_dataframe(df_abs)
#df_abs.to_csv('abs.csv', encoding='utf-8')
return df_abs
def _compute_dif_dataframe(self):
'''
Compute differential dataframe by substraction each col from prev col.
First col becomes NaN and is therefore dropped.
Sort by last column.
'''
df_dif = self.df_abs.diff(axis=1)
df_dif = df_dif.sort_values(df_dif.columns[-1], ascending = False)
df_dif = df_dif.drop(df_dif.columns[0], axis=1)
df_dif = self._convert_all_floats_to_ints(df_dif)
return df_dif
def _compute_agr_dataframe(self, df):
'''Compute dataframe with average growth rate, sorted by last column.'''
residents_per_case = list(df['Residents per case']) # save column
population = list(df['Population']) # save column
trend = list(df['Trend']) # save column
df_tmp = df.drop(['Population','Residents per case', 'Trend'], axis=1) # delete columns from dataframe
df_agr = df_tmp.pct_change(axis=1) # compute percentage change
df_agr = df_agr.drop(df_agr.columns[0], axis=1) # first column becomes NaN -> drop
si_pct = df_agr.mean(axis=1) # compute mean values (returns series, not dataframe!)
si_pct = si_pct.multiply(100).round(1) # adjust 0.1234 -> 12.3
df_agr.insert(0, 'Population', population) # restore column
df_agr.insert(1, 'Residents per case', residents_per_case) # restore column
df_agr.insert(2, 'Trend', trend) # restore column
df_agr.insert(3, 'AGR (mean value)', list(si_pct)) # save mean values
df_agr = df_agr.iloc[:, list(range(4))] # drop evrything after column 'Mean Values'
df_agr = df_agr.sort_values(df_agr.columns[-1], ascending = False) # sort by 'Mean Values'
return df_agr
def is_dirty(self, secs):
'''
Returns True if last update exceeds specified amount of secs
'''
return time() - self.last_update_time > secs
def _check_for_update(self, force_reload=False):
'''
Download data from Github and parse into dataframes if last update does exceed
specified amount of secs or force_reload is true.
'''
if self.is_dirty(4 * 3600) or force_reload:
urllib.request.urlretrieve(self.url, self.filepath)
self.df_abs = self._compute_abs_dataframe()
self.df_dif = self._compute_dif_dataframe()
# Insert population column at pos 0 in dataframe
self.df_abs = self._insert_pop_column(self.df_abs)
self.df_dif = self._insert_pop_column(self.df_dif)
# Insert residents per case column at pos 1 in dataframe
self.df_abs = self._insert_residents_per_case_column(self.df_abs)
self.df_dif = self._insert_residents_per_case_column(self.df_dif)
# Insert trend column at pos 2 in dataframe
self.df_abs = self._insert_trend_column(self.df_abs, 6)
self.df_dif = self._insert_trend_column(self.df_dif, 6)
self.last_update_time = time()
print("data reloaded...")
def _prune_dataframe(self, df, cases, days):
'''
Discard all columns before <days> and all rows with less than <cases> in last column but only if cases > 0.
'''
df = df.iloc[:, [0, 1, 2] + list(range(-days, 0))]
if cases > 0:
last_column = list(df.columns)[-1]
df = df[df[last_column] >= cases]
return df
def _shrink_dataframe(self, df, country):
'''Discard all rows before <country>'''
try:
row_index = df.index.get_loc(country)
except KeyError:
if country != "":
print(country, 'not found in row index')
else:
df = df.iloc[row_index:]
return df
def _append_double_rates_to_df_agr(self, df_agr):
'''
Compute double rates from percentage increase and append as new column.
Last col of <df> contains percentage increase (e.g. 9.2).
'''
def f(x):
try:
fx = round(log10(2) / log10(1 + x / 100), 1)
except ZeroDivisionError:
fx = 0
return fx
double_rates = [f(x) for x in df_agr["AGR (mean value)"]]
df_agr['double_rates'] = double_rates
return df_agr
def compute_abs_values(self, days, cases, country="", force_reload=False):
self._check_for_update(force_reload)
df_abs_tmp = self._prune_dataframe(self.df_abs, cases, days)
df_abs_tmp = self._shrink_dataframe(df_abs_tmp, country)
df_rel_tmp = self._compute_rel_dataframe(df_abs_tmp)
abs_values = df_abs_tmp.reset_index().values.tolist()
rel_values = df_rel_tmp.reset_index().values.tolist()
dat_start = df_abs_tmp.columns[3]
dat_end = df_abs_tmp.columns[-1]
return abs_values, rel_values, dat_start, dat_end
def compute_dif_values(self, days, cases, country="", force_reload=False):
self._check_for_update(force_reload)
df_abs_tmp = self._prune_dataframe(self.df_abs, cases, days) # Create corresponding df_abs for <cases>.
df_dif_tmp = self._drop_rows_based_on_unique_indicis(df_abs_tmp, self.df_dif) # Drop countries which are not in corresponding df_abs.
df_dif_tmp = self._prune_dataframe(df_dif_tmp, 0, days) # Only look at last <days>. Leave cases as is.
df_dif_tmp = self._shrink_dataframe(df_dif_tmp, country) # Put country on top of the list if applicable.
df_rel_tmp = self._compute_rel_dataframe(df_dif_tmp)
dif_values = df_dif_tmp.reset_index().values.tolist()
rel_values = df_rel_tmp.reset_index().values.tolist()
dat_start = df_abs_tmp.columns[3]
dat_end = df_abs_tmp.columns[-1]
return dif_values, rel_values, dat_start, dat_end
def compute_agr_values(self, cases, force_reload=False):
self._check_for_update(force_reload)
df_abs_tmp = self._prune_dataframe(self.df_abs, cases, 8)
df_agr_tmp = self._compute_agr_dataframe(df_abs_tmp)
df_rel_tmp = self._compute_rel_dataframe(df_agr_tmp)
df_agr_tmp = self._append_double_rates_to_df_agr(df_agr_tmp)
agr_values = df_agr_tmp.reset_index().values.tolist()
rel_values = df_rel_tmp.reset_index().values.tolist()
dat_start = df_abs_tmp.columns[3]
dat_end = df_abs_tmp.columns[-1]
return agr_values, rel_values, dat_start, dat_end
coviddata_confirmed = CovidData(CONFIRMED_GLOBAL_FILE, CONFIRMED_GLOBAL_URL)
coviddata_deaths = CovidData(DEATHS_GLOBAL_FILE, DEATHS_GLOBAL_URL)
coviddata_recovered = CovidData(RECOVERED_GLOBAL_FILE, RECOVERED_GLOBAL_URL)
app = Flask(__name__)
@app.route("/confirmed/")
def confirmed():
days = request.args.get('days', default=7, type=int)
cases = request.args.get('cases', default=32000, type=int)
country = request.args.get('country', default="", type=str)
force_reload = True if request.args.get('force', default="", type=str) == 'yes' else False
absval, relval, dat_start, dat_end = coviddata_confirmed.compute_abs_values(days, cases, country, force_reload)
header = "Total Confirmed Cases (" + | |
<reponame>homebysix/grahampugh-recipes
#!/usr/local/autopkg/python
"""
JamfPackageUploader processor for AutoPkg
by <NAME>
Developed from an idea posted at
https://www.jamf.com/jamf-nation/discussions/27869#responseChild166021
"""
import os
import re
import hashlib
import json
import base64
import subprocess
import uuid
import xml.etree.ElementTree as ElementTree
from collections import namedtuple
from time import sleep
from zipfile import ZipFile, ZIP_DEFLATED
from shutil import copyfile, rmtree
from urllib.parse import urlparse, quote
from xml.sax.saxutils import escape
from autopkglib import Processor, ProcessorError # pylint: disable=import-error
class JamfPackageUploader(Processor):
"""A processor for AutoPkg that will upload a package to a JCDS or
File Share Distribution Point.
Can be run as a post-processor for a pkg recipe or in a child recipe.
The pkg recipe must output pkg_path or this will fail."""
input_variables = {
"pkg_name": {
"required": False,
"description": "Package name. If supplied, will rename the package supplied "
"in the pkg_path key when uploading it to the fileshare.",
"default": "",
},
"pkg_path": {
"required": False,
"description": "Path to a pkg or dmg to import - provided by "
"previous pkg recipe/processor.",
"default": "",
},
"version": {
"required": False,
"description": "Version string - provided by "
"previous pkg recipe/processor.",
"default": "",
},
"pkg_category": {
"required": False,
"description": "Package category",
"default": "",
},
"pkg_info": {
"required": False,
"description": "Package info field",
"default": "",
},
"pkg_notes": {
"required": False,
"description": "Package notes field",
"default": "",
},
"pkg_priority": {
"required": False,
"description": "Package priority. Default=10",
"default": "10",
},
"reboot_required": {
"required": False,
"description": (
"Whether a package requires a reboot after installation. "
"Default='False'"
),
"default": "",
},
"os_requirement": {
"required": False,
"description": "Package OS requirement",
"default": "",
},
"required_processor": {
"required": False,
"description": "Package required processor. Acceptable values are 'x86' or 'None'",
"default": "None",
},
"send_notification": {
"required": False,
"description": (
"Whether to send a notification when a package is installed. "
"Default='False'"
),
"default": "",
},
"replace_pkg": {
"required": False,
"description": "Overwrite an existing package if True.",
"default": "False",
},
"replace_pkg_metadata": {
"required": False,
"description": "Overwrite existing package metadata and continue if True, "
"even if the package object is not re-uploaded.",
"default": "False",
},
"JSS_URL": {
"required": True,
"description": "URL to a Jamf Pro server that the API user has write access "
"to, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_USERNAME": {
"required": True,
"description": "Username of account with appropriate access to "
"jss, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_PASSWORD": {
"required": True,
"description": "Password of api user, optionally set as a key in "
"the com.github.autopkg preference file.",
},
"SMB_URL": {
"required": False,
"description": "URL to a Jamf Pro fileshare distribution point "
"which should be in the form smb://server "
"preference file.",
"default": "",
},
"SMB_USERNAME": {
"required": False,
"description": "Username of account with appropriate access to "
"jss, optionally set as a key in the com.github.autopkg "
"preference file.",
"default": "",
},
"SMB_PASSWORD": {
"required": False,
"description": "Password of api user, optionally set as a key in "
"the com.github.autopkg preference file.",
"default": "",
},
}
output_variables = {
"pkg_path": {
"description": "The path of the package as provided from the parent recipe.",
},
"pkg_name": {"description": "The name of the uploaded package."},
"pkg_uploaded": {
"description": "True/False depending if a package was uploaded or not.",
},
"jamfpackageuploader_summary_result": {
"description": "Description of interesting results.",
},
}
description = __doc__
# do not edit directly - copy from template
def write_json_file(self, data, tmp_dir="/tmp/jamf_upload"):
"""dump some json to a temporary file"""
self.make_tmp_dir(tmp_dir)
tf = os.path.join(tmp_dir, f"jamf_upload_{str(uuid.uuid4())}.json")
with open(tf, "w") as fp:
json.dump(data, fp)
return tf
# do not edit directly - copy from template
def write_temp_file(self, data, tmp_dir="/tmp/jamf_upload"):
"""dump some text to a temporary file"""
self.make_tmp_dir(tmp_dir)
tf = os.path.join(tmp_dir, f"jamf_upload_{str(uuid.uuid4())}.txt")
with open(tf, "w") as fp:
fp.write(data)
return tf
# do not edit directly - copy from template
def make_tmp_dir(self, tmp_dir="/tmp/jamf_upload"):
"""make the tmp directory"""
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
return tmp_dir
# do not edit directly - copy from template
def clear_tmp_dir(self, tmp_dir="/tmp/jamf_upload"):
"""remove the tmp directory"""
if os.path.exists(tmp_dir):
rmtree(tmp_dir)
return tmp_dir
# do not edit directly - copy from template
def curl(self, method, url, auth, data="", additional_headers=""):
"""
build a curl command based on method (GET, PUT, POST, DELETE)
If the URL contains 'uapi' then token should be passed to the auth variable,
otherwise the enc_creds variable should be passed to the auth variable
"""
tmp_dir = self.make_tmp_dir()
headers_file = os.path.join(tmp_dir, "curl_headers_from_jamf_upload.txt")
output_file = os.path.join(tmp_dir, "curl_output_from_jamf_upload.txt")
cookie_jar = os.path.join(tmp_dir, "curl_cookies_from_jamf_upload.txt")
# build the curl command
curl_cmd = [
"/usr/bin/curl",
"--silent",
"--show-error",
"-X",
method,
"-D",
headers_file,
"--output",
output_file,
url,
]
# authorisation if using Jamf Pro API or Classic API
# if using uapi and we already have a token then we use the token for authorization
if "uapi" in url and "tokens" not in url:
curl_cmd.extend(["--header", f"authorization: Bearer {auth}"])
# basic auth to obtain a token, or for classic API
elif "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
curl_cmd.extend(["--header", f"authorization: Basic {auth}"])
# set either Accept or Content-Type depending on method
if method == "GET" or method == "DELETE":
curl_cmd.extend(["--header", "Accept: application/json"])
# icon upload requires special method
elif method == "POST" and "fileuploads" in url:
curl_cmd.extend(["--header", "Content-type: multipart/form-data"])
curl_cmd.extend(["--form", f"name=@{data}"])
elif method == "POST" or method == "PUT":
if data:
if "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
# jamf data upload requires upload-file argument
curl_cmd.extend(["--upload-file", data])
else:
# slack requires data argument
curl_cmd.extend(["--data", data])
# uapi and slack accepts json, classic API only accepts xml
if "JSSResource" in url:
curl_cmd.extend(["--header", "Content-type: application/xml"])
else:
curl_cmd.extend(["--header", "Content-type: application/json"])
else:
self.output(f"WARNING: HTTP method {method} not supported")
# write session for jamf requests
if "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
try:
with open(headers_file, "r") as file:
headers = file.readlines()
existing_headers = [x.strip() for x in headers]
for header in existing_headers:
if "APBALANCEID" in header or "AWSALB" in header:
with open(cookie_jar, "w") as fp:
fp.write(header)
except IOError:
pass
# look for existing session
try:
with open(cookie_jar, "r") as file:
headers = file.readlines()
existing_headers = [x.strip() for x in headers]
for header in existing_headers:
if "APBALANCEID" in header or "AWSALB" in header:
cookie = header.split()[1].rstrip(";")
self.output(f"Existing cookie found: {cookie}", verbose_level=2)
curl_cmd.extend(["--cookie", cookie])
except IOError:
self.output(
"No existing cookie found - starting new session", verbose_level=2
)
# additional headers for advanced requests
if additional_headers:
curl_cmd.extend(additional_headers)
self.output(f"curl command: {' '.join(curl_cmd)}", verbose_level=3)
# now subprocess the curl command and build the r tuple which contains the
# headers, status code and outputted data
subprocess.check_output(curl_cmd)
r = namedtuple(
"r", ["headers", "status_code", "output"], defaults=(None, None, None)
)
try:
with open(headers_file, "r") as file:
headers = file.readlines()
r.headers = [x.strip() for x in headers]
for header in r.headers: # pylint: disable=not-an-iterable
if re.match(r"HTTP/(1.1|2)", header) and "Continue" not in header:
r.status_code = int(header.split()[1])
except IOError:
raise ProcessorError(f"WARNING: {headers_file} not found")
if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
with open(output_file, "rb") as file:
if "uapi" in url:
r.output = json.load(file)
else:
r.output = file.read()
else:
self.output(f"No output from request ({output_file} not found or empty)")
return r()
# do not edit directly - copy from template
def status_check(self, r, endpoint_type, obj_name):
"""Return a message dependent on the HTTP response"""
if r.status_code == 200 or r.status_code == 201:
self.output(f"{endpoint_type} '{obj_name}' uploaded successfully")
return "break"
elif r.status_code == 409:
self.output(r.output, verbose_level=2)
raise ProcessorError(
f"WARNING: {endpoint_type} '{obj_name}' upload failed due to a conflict"
)
elif r.status_code == 401:
raise ProcessorError(
f"ERROR: {endpoint_type} '{obj_name}' upload failed due to permissions error"
)
else:
self.output(f"WARNING: {endpoint_type} '{obj_name}' upload failed")
self.output(r.output, verbose_level=2)
def sha512sum(self, filename):
"""calculate the SHA512 hash of the package
(see https://stackoverflow.com/a/44873382)"""
h = hashlib.sha512()
b = bytearray(128 * 1024)
mv = memoryview(b)
with open(filename, "rb", buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def mount_smb(self, mount_share, mount_user, mount_pass):
"""Mount distribution point."""
mount_cmd = [
"/usr/bin/osascript",
"-e",
(
f'mount volume "{mount_share}" as user name "{mount_user}" '
f'with password "{mount_pass}"'
),
]
self.output(
f"Mount command: {' '.join(mount_cmd)}", verbose_level=3,
)
| |
<filename>ionoscloud/api/nat_gateways_api.py
from __future__ import absolute_import
import re # noqa: F401
import six
from ionoscloud.api_client import ApiClient
from ionoscloud.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class NATGatewaysApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def datacenters_natgateways_delete(self, datacenter_id, nat_gateway_id, **kwargs): # noqa: E501
"""Delete NAT Gateways # noqa: E501
Remove the specified NAT Gateway from the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_natgateways_delete(datacenter_id, nat_gateway_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param nat_gateway_id: The unique ID of the NAT Gateway. (required)
:type nat_gateway_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_natgateways_delete_with_http_info(datacenter_id, nat_gateway_id, **kwargs) # noqa: E501
def datacenters_natgateways_delete_with_http_info(self, datacenter_id, nat_gateway_id, **kwargs): # noqa: E501
"""Delete NAT Gateways # noqa: E501
Remove the specified NAT Gateway from the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_natgateways_delete_with_http_info(datacenter_id, nat_gateway_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param nat_gateway_id: The unique ID of the NAT Gateway. (required)
:type nat_gateway_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'nat_gateway_id',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_natgateways_delete" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_natgateways_delete`") # noqa: E501
# verify the required parameter 'nat_gateway_id' is set
if self.api_client.client_side_validation and ('nat_gateway_id' not in local_var_params or # noqa: E501
local_var_params['nat_gateway_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nat_gateway_id` when calling `datacenters_natgateways_delete`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_natgateways_delete`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_natgateways_delete`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'nat_gateway_id' in local_var_params:
path_params['natGatewayId'] = local_var_params['nat_gateway_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = None
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/natgateways/{natGatewayId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_natgateways_find_by_nat_gateway_id(self, datacenter_id, nat_gateway_id, **kwargs): # noqa: E501
"""Retrieve NAT Gateways # noqa: E501
Retrieve the properties of the specified NAT Gateway within the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_natgateways_find_by_nat_gateway_id(datacenter_id, nat_gateway_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param nat_gateway_id: The unique ID of the NAT Gateway. (required)
:type nat_gateway_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: NatGateway
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_natgateways_find_by_nat_gateway_id_with_http_info(datacenter_id, nat_gateway_id, **kwargs) # noqa: E501
def datacenters_natgateways_find_by_nat_gateway_id_with_http_info(self, datacenter_id, nat_gateway_id, **kwargs): # noqa: E501
"""Retrieve NAT Gateways # noqa: E501
Retrieve the properties of the specified NAT Gateway within the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_natgateways_find_by_nat_gateway_id_with_http_info(datacenter_id, | |
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.tensorboard import SummaryWriter
from PIL import Image as PILImage
import numpy as np
import pickle
import cv2
import onnxruntime as ort
from tqdm import tqdm
import os
import json
import shutil
from urllib.request import urlretrieve
from opendr.engine.learners import Learner
from opendr.engine.data import Image
from opendr.engine.target import Category
from opendr.engine.constants import OPENDR_SERVER_URL
from opendr.perception.face_recognition.algorithm.backbone.model_resnet import ResNet_50, ResNet_101, ResNet_152
from opendr.perception.face_recognition.algorithm.backbone.model_irse import IR_50, IR_101, IR_152, IR_SE_50, IR_SE_101, \
IR_SE_152
from opendr.perception.face_recognition.algorithm.backbone.model_mobilenet import MobileFaceNet
from opendr.perception.face_recognition.algorithm.head.losses import ArcFace, CosFace, SphereFace, AMSoftmax, Classifier
from opendr.perception.face_recognition.algorithm.loss.focal import FocalLoss
from opendr.perception.face_recognition.algorithm.util.utils import make_weights_for_balanced_classes, get_val_data, \
separate_irse_bn_paras, separate_mobilenet_bn_paras, l2_norm, \
separate_resnet_bn_paras, warm_up_lr, schedule_lr, perform_val, perform_val_imagefolder, buffer_val, AverageMeter, \
accuracy
from opendr.perception.face_recognition.algorithm.align.align import face_align
class FaceRecognitionLearner(Learner):
def __init__(self, lr=0.1, iters=120, batch_size=128, optimizer='sgd', device='cuda', threshold=0.0,
backbone='ir_50', network_head='arcface', loss='focal',
temp_path='./temp', mode='backbone_only',
checkpoint_after_iter=0, checkpoint_load_iter=0, val_after=0,
input_size=[112, 112], rgb_mean=[0.5, 0.5, 0.5], rgb_std=[0.5, 0.5, 0.5], embedding_size=512,
weight_decay=5e-4, momentum=0.9, drop_last=True, stages=[35, 65, 95],
pin_memory=True, num_workers=4,
seed=123):
super(FaceRecognitionLearner, self).__init__(lr=lr, iters=iters, batch_size=batch_size, optimizer=optimizer,
backbone=backbone, network_head=network_head, temp_path=temp_path,
checkpoint_after_iter=checkpoint_after_iter,
checkpoint_load_iter=checkpoint_load_iter,
device=device, threshold=threshold)
if input_size is None:
input_size = [112, 112]
if rgb_mean is None:
rgb_mean = [0.5, 0.5, 0.5]
if rgb_std is None:
rgb_std = [0.5, 0.5, 0.5]
if stages is None:
stages = [35, 65, 95]
self.seed = seed
self.loss = loss
self.mode = mode
self.input_size = input_size
self.rgb_mean = rgb_mean
self.rgb_std = rgb_std
if self.backbone == 'mobilefacenet':
self.embedding_size = 128
else:
self.embedding_size = embedding_size
self.weight_decay = weight_decay
self.momentum = momentum
self.drop_last = drop_last
self.stages = stages
self.pin_memory = pin_memory
self.num_workers = num_workers
torch.manual_seed(self.seed)
self.network_head_model = None
self.backbone_model = None
self.epoch = checkpoint_load_iter
self._model = None
self.writer = None
self.logging = False
self.database = None
self.num_class = 0
self.classes = None
self.opt = None
self.val_after = val_after
self.data = None
self.pairs = None
self.ort_backbone_session = None # ONNX runtime inference session for backbone
self.ort_head_session = None # ONNX runtime inference session for head
self.temp_path = temp_path
def __create_model(self, num_class=0):
# Create the backbone architecture
self.num_class = num_class
if self.backbone_model is None:
backbone_dict = {'resnet_50': ResNet_50(self.input_size),
'resnet_101': ResNet_101(self.input_size),
'resnet_152': ResNet_152(self.input_size),
'ir_50': IR_50(self.input_size),
'ir_101': IR_101(self.input_size),
'ir_152': IR_152(self.input_size),
'ir_se_50': IR_SE_50(self.input_size),
'ir_se_101': IR_SE_101(self.input_size),
'ir_se_152': IR_SE_152(self.input_size),
'mobilefacenet': MobileFaceNet()}
backbone = backbone_dict[self.backbone]
self.backbone_model = backbone.to(self.device)
# Create the head architecture
if self.mode != 'backbone_only':
head_dict = {
'arcface': ArcFace(in_features=self.embedding_size, out_features=self.num_class, device=self.device),
'cosface': CosFace(in_features=self.embedding_size, out_features=self.num_class, device=self.device),
'sphereface': SphereFace(in_features=self.embedding_size, out_features=self.num_class,
device=self.device),
'am_softmax': AMSoftmax(in_features=self.embedding_size, out_features=self.num_class,
device=self.device),
'classifier': Classifier(in_features=self.embedding_size, out_features=self.num_class,
device=self.device)}
head = head_dict[self.network_head]
self.network_head_model = head.to(self.device)
else:
self.network_head_model = None
def align(self, data='', dest='/aligned', crop_size=112, silent=False):
"""
This method is used for aligning the faces in an imagefolder dataset.
:param data: The folder containing the images to be aligned
:type data: str
:param dest: destination folder to save the aligned images, defaults to './temp/aligned'
:type dest: str
:param silent: if set to True, disables printing training progress reports to STDOUT
:type silent: bool, optional
"""
face_align(data, dest, crop_size)
if not silent:
print('Face align finished')
def fit(self, dataset, val_dataset=None, logging_path='', silent=False, verbose=True):
"""
This method is used for training the algorithm on a train dataset and
validating on a val dataset.
Can be parameterized based on the learner attributes and custom hyperparameters
added by the implementation and returns stats regarding training and validation.
:param dataset: Object that holds the training dataset
:type dataset: Dataset class type
:param val_dataset: Object that holds the validation dataset
:type val_dataset: Dataset class type, optional
:param verbose: if set to True, enables the maximum logging verbosity (depends on the actual algorithm)
:type verbose: bool, optional
:param silent: if set to True, disables printing training progress reports to STDOUT
:type silent: bool, optional
:param logging_path: path to save tensorboard log files. If set to None or ‘’, tensorboard logging is disabled
:type logging_path: str, optional
:return: Returns stats regarding training and validation
:rtype: dict
"""
loss_list = []
acc_list = []
eval_results = []
# Tensorboard logging
if logging_path != '':
self.logging = True
self.writer = SummaryWriter(logging_path)
else:
self.logging = False
train_transform = transforms.Compose([
transforms.Resize([int(128 * self.input_size[0] / 112), int(128 * self.input_size[0] / 112)]),
transforms.RandomCrop([self.input_size[0], self.input_size[1]]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=self.rgb_mean,
std=self.rgb_std),
])
if dataset.dataset_type != 'imagefolder':
raise UserWarning('dataset should be of type imagefolder')
dataset_train = datasets.ImageFolder(dataset.path, train_transform)
# Create a weighted random sampler to process imbalanced data
weights = make_weights_for_balanced_classes(dataset_train.imgs, len(dataset_train.classes))
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
train_loader = torch.utils.data.DataLoader(
dataset_train, batch_size=self.batch_size, sampler=sampler, pin_memory=self.pin_memory,
num_workers=self.num_workers, drop_last=self.drop_last
)
loss_dict = {'focal': FocalLoss(),
'softmax': nn.CrossEntropyLoss()}
criterion = loss_dict[self.loss]
self.classes = train_loader.dataset.classes
self.num_class = len(train_loader.dataset.classes)
self.__create_model(self.num_class)
self._model = {self.backbone_model, self.network_head_model}
if self.checkpoint_load_iter != 0:
if self.mode == 'head_only':
if os.path.exists(
os.path.join(self.temp_path, 'checkpoints', 'head_{}_iter_{}'.format(
self.network_head, self.epoch))):
head_info = torch.load(os.path.join(self.temp_path, 'checkpoints', 'head_{}_iter_{}'.format(
self.network_head, self.epoch)))
self.network_head_model.load_state_dict(head_info['head_state_dict'])
else:
raise UserWarning('No checkpoint head_{}_iter_{} found'.format(
self.network_head, self.epoch))
else:
if os.path.exists(os.path.join(self.temp_path, 'checkpoints', 'backbone_{}_iter_{}'.format(
self.backbone, self.epoch))) and os.path.exists(
os.path.join(self.temp_path, 'checkpoints', 'head_{}_iter_{}'.format(
self.network_head, self.epoch))):
backbone_info = torch.load(os.path.join(self.temp_path, 'checkpoints', 'backbone_{}_iter_{}'.format(
self.backbone, self.epoch)))
self.backbone_model.load_state_dict(backbone_info['backbone_state_dict'])
head_info = torch.load(os.path.join(self.temp_path, 'checkpoints', 'head_{}_iter_{}'.format(
self.network_head, self.epoch)))
self.network_head_model.load_state_dict(head_info['head_state_dict'])
else:
raise UserWarning('No correct checkpoint files found')
# Separate batch_norm parameters from others
# Do not do weight decay for batch_norm parameters to improve the generalizability
if self.backbone.find("ir") >= 0:
backbone_paras_only_bn, backbone_paras_wo_bn = separate_irse_bn_paras(
self.backbone_model)
_, head_paras_wo_bn = separate_irse_bn_paras(self.network_head_model)
elif self.backbone == 'mobilefacenet':
_, head_paras_wo_bn = separate_mobilenet_bn_paras(self.network_head_model)
else:
backbone_paras_only_bn, backbone_paras_wo_bn = separate_resnet_bn_paras(
self.backbone_model)
_, head_paras_wo_bn = separate_resnet_bn_paras(self.network_head_model)
# ======= Train & validation & save checkpoint =======#
disp_freq = len(train_loader) // 100 # Frequency to display training loss & acc
if disp_freq < 1:
disp_freq = 1000
num_epoch_warm_up = self.iters // 25
num_batch_warm_up = len(train_loader) * num_epoch_warm_up
if self.mode == 'head_only':
optimizer = optim.SGD([{'params': head_paras_wo_bn, 'weight_decay': self.weight_decay}], lr=self.lr,
momentum=self.momentum)
else:
if self.backbone == 'mobilefacenet':
optimizer = optim.SGD([{'params': list(self.backbone_model.parameters()) + head_paras_wo_bn,
'weight_decay': self.weight_decay}],
lr=self.lr, momentum=self.momentum)
else:
optimizer = optim.SGD(
[{'params': backbone_paras_wo_bn + head_paras_wo_bn, 'weight_decay': self.weight_decay},
{'params': backbone_paras_only_bn}], lr=self.lr, momentum=self.momentum)
self.opt = optimizer
if self.checkpoint_load_iter != 0:
self.opt.load_state_dict(head_info['optimizer_state_dict'])
for epoch in range(self.iters): # Start training process
if self.epoch == self.stages[0]: # Adjust LR for each training stage after warm up
schedule_lr(self.opt)
if self.epoch == self.stages[1]:
schedule_lr(self.opt)
if self.epoch == self.stages[2]:
schedule_lr(self.opt)
if self.mode == 'head_only':
self.backbone_model.eval()
else:
self.backbone_model.train()
self.network_head_model.train()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
self.network_head_model = self.network_head_model.to(self.device)
batch = 0 # batch index
for inputs, labels in tqdm(iter(train_loader), disable=(not verbose or silent)):
if (epoch + 1 <= num_epoch_warm_up) and (
batch + 1 <= num_batch_warm_up): # Adjust LR for each training batch during warm up
warm_up_lr(batch + 1, num_batch_warm_up, self.lr, optimizer)
# Compute output
inputs = inputs.to(self.device)
labels = labels.to(self.device).long()
features = self.backbone_model(inputs)
outputs = self.network_head_model(features, labels)
loss = criterion(outputs, labels)
| |
<filename>sdk/python/pulumi_github/outputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
from . import outputs
__all__ = [
'BranchProtectionRequiredPullRequestReview',
'BranchProtectionRequiredStatusCheck',
'BranchProtectionV3RequiredPullRequestReviews',
'BranchProtectionV3RequiredStatusChecks',
'BranchProtectionV3Restrictions',
'OrganizationWebhookConfiguration',
'RepositoryPages',
'RepositoryPagesSource',
'RepositoryTemplate',
'RepositoryWebhookConfiguration',
'TeamSyncGroupMappingGroup',
'GetCollaboratorsCollaboratorResult',
'GetOrganizationTeamSyncGroupsGroupResult',
'GetRepositoryPageResult',
'GetRepositoryPageSourceResult',
]
@pulumi.output_type
class BranchProtectionRequiredPullRequestReview(dict):
def __init__(__self__, *,
dismiss_stale_reviews: Optional[bool] = None,
dismissal_restrictions: Optional[Sequence[str]] = None,
require_code_owner_reviews: Optional[bool] = None,
required_approving_review_count: Optional[int] = None):
if dismiss_stale_reviews is not None:
pulumi.set(__self__, "dismiss_stale_reviews", dismiss_stale_reviews)
if dismissal_restrictions is not None:
pulumi.set(__self__, "dismissal_restrictions", dismissal_restrictions)
if require_code_owner_reviews is not None:
pulumi.set(__self__, "require_code_owner_reviews", require_code_owner_reviews)
if required_approving_review_count is not None:
pulumi.set(__self__, "required_approving_review_count", required_approving_review_count)
@property
@pulumi.getter(name="dismissStaleReviews")
def dismiss_stale_reviews(self) -> Optional[bool]:
return pulumi.get(self, "dismiss_stale_reviews")
@property
@pulumi.getter(name="dismissalRestrictions")
def dismissal_restrictions(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "dismissal_restrictions")
@property
@pulumi.getter(name="requireCodeOwnerReviews")
def require_code_owner_reviews(self) -> Optional[bool]:
return pulumi.get(self, "require_code_owner_reviews")
@property
@pulumi.getter(name="requiredApprovingReviewCount")
def required_approving_review_count(self) -> Optional[int]:
return pulumi.get(self, "required_approving_review_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BranchProtectionRequiredStatusCheck(dict):
def __init__(__self__, *,
contexts: Optional[Sequence[str]] = None,
strict: Optional[bool] = None):
if contexts is not None:
pulumi.set(__self__, "contexts", contexts)
if strict is not None:
pulumi.set(__self__, "strict", strict)
@property
@pulumi.getter
def contexts(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "contexts")
@property
@pulumi.getter
def strict(self) -> Optional[bool]:
return pulumi.get(self, "strict")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BranchProtectionV3RequiredPullRequestReviews(dict):
def __init__(__self__, *,
dismiss_stale_reviews: Optional[bool] = None,
dismissal_teams: Optional[Sequence[str]] = None,
dismissal_users: Optional[Sequence[str]] = None,
include_admins: Optional[bool] = None,
require_code_owner_reviews: Optional[bool] = None,
required_approving_review_count: Optional[int] = None):
if dismiss_stale_reviews is not None:
pulumi.set(__self__, "dismiss_stale_reviews", dismiss_stale_reviews)
if dismissal_teams is not None:
pulumi.set(__self__, "dismissal_teams", dismissal_teams)
if dismissal_users is not None:
pulumi.set(__self__, "dismissal_users", dismissal_users)
if include_admins is not None:
pulumi.set(__self__, "include_admins", include_admins)
if require_code_owner_reviews is not None:
pulumi.set(__self__, "require_code_owner_reviews", require_code_owner_reviews)
if required_approving_review_count is not None:
pulumi.set(__self__, "required_approving_review_count", required_approving_review_count)
@property
@pulumi.getter(name="dismissStaleReviews")
def dismiss_stale_reviews(self) -> Optional[bool]:
return pulumi.get(self, "dismiss_stale_reviews")
@property
@pulumi.getter(name="dismissalTeams")
def dismissal_teams(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "dismissal_teams")
@property
@pulumi.getter(name="dismissalUsers")
def dismissal_users(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "dismissal_users")
@property
@pulumi.getter(name="includeAdmins")
def include_admins(self) -> Optional[bool]:
return pulumi.get(self, "include_admins")
@property
@pulumi.getter(name="requireCodeOwnerReviews")
def require_code_owner_reviews(self) -> Optional[bool]:
return pulumi.get(self, "require_code_owner_reviews")
@property
@pulumi.getter(name="requiredApprovingReviewCount")
def required_approving_review_count(self) -> Optional[int]:
return pulumi.get(self, "required_approving_review_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BranchProtectionV3RequiredStatusChecks(dict):
def __init__(__self__, *,
contexts: Optional[Sequence[str]] = None,
include_admins: Optional[bool] = None,
strict: Optional[bool] = None):
if contexts is not None:
pulumi.set(__self__, "contexts", contexts)
if include_admins is not None:
pulumi.set(__self__, "include_admins", include_admins)
if strict is not None:
pulumi.set(__self__, "strict", strict)
@property
@pulumi.getter
def contexts(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "contexts")
@property
@pulumi.getter(name="includeAdmins")
def include_admins(self) -> Optional[bool]:
return pulumi.get(self, "include_admins")
@property
@pulumi.getter
def strict(self) -> Optional[bool]:
return pulumi.get(self, "strict")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BranchProtectionV3Restrictions(dict):
def __init__(__self__, *,
apps: Optional[Sequence[str]] = None,
teams: Optional[Sequence[str]] = None,
users: Optional[Sequence[str]] = None):
if apps is not None:
pulumi.set(__self__, "apps", apps)
if teams is not None:
pulumi.set(__self__, "teams", teams)
if users is not None:
pulumi.set(__self__, "users", users)
@property
@pulumi.getter
def apps(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "apps")
@property
@pulumi.getter
def teams(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "teams")
@property
@pulumi.getter
def users(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "users")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OrganizationWebhookConfiguration(dict):
def __init__(__self__, *,
url: str,
content_type: Optional[str] = None,
insecure_ssl: Optional[bool] = None,
secret: Optional[str] = None):
"""
:param str url: URL of the webhook
"""
pulumi.set(__self__, "url", url)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if insecure_ssl is not None:
pulumi.set(__self__, "insecure_ssl", insecure_ssl)
if secret is not None:
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter
def url(self) -> str:
"""
URL of the webhook
"""
return pulumi.get(self, "url")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[str]:
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="insecureSsl")
def insecure_ssl(self) -> Optional[bool]:
return pulumi.get(self, "insecure_ssl")
@property
@pulumi.getter
def secret(self) -> Optional[str]:
return pulumi.get(self, "secret")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RepositoryPages(dict):
def __init__(__self__, *,
source: 'outputs.RepositoryPagesSource',
cname: Optional[str] = None,
custom404: Optional[bool] = None,
html_url: Optional[str] = None,
status: Optional[str] = None,
url: Optional[str] = None):
"""
:param 'RepositoryPagesSourceArgs' source: The source branch and directory for the rendered Pages site. See GitHub Pages Source below for details.
:param str cname: The custom domain for the repository. This can only be set after the repository has been created.
:param bool custom404: Whether the rendered GitHub Pages site has a custom 404 page.
:param str html_url: The absolute URL (including scheme) of the rendered GitHub Pages site e.g. `https://username.github.io`.
:param str status: The GitHub Pages site's build status e.g. `building` or `built`.
"""
pulumi.set(__self__, "source", source)
if cname is not None:
pulumi.set(__self__, "cname", cname)
if custom404 is not None:
pulumi.set(__self__, "custom404", custom404)
if html_url is not None:
pulumi.set(__self__, "html_url", html_url)
if status is not None:
pulumi.set(__self__, "status", status)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def source(self) -> 'outputs.RepositoryPagesSource':
"""
The source branch and directory for the rendered Pages site. See GitHub Pages Source below for details.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def cname(self) -> Optional[str]:
"""
The custom domain for the repository. This can only be set after the repository has been created.
"""
return pulumi.get(self, "cname")
@property
@pulumi.getter
def custom404(self) -> Optional[bool]:
"""
Whether the rendered GitHub Pages site has a custom 404 page.
"""
return pulumi.get(self, "custom404")
@property
@pulumi.getter(name="htmlUrl")
def html_url(self) -> Optional[str]:
"""
The absolute URL (including scheme) of the rendered GitHub Pages site e.g. `https://username.github.io`.
"""
return pulumi.get(self, "html_url")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The GitHub Pages site's build status e.g. `building` or `built`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def url(self) -> Optional[str]:
return pulumi.get(self, "url")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RepositoryPagesSource(dict):
def __init__(__self__, *,
branch: str,
path: Optional[str] = None):
"""
:param str branch: The repository branch used to publish the site's source files. (i.e. `main` or `gh-pages`.
:param str path: The repository directory from which the site publishes (Default: `/`).
"""
pulumi.set(__self__, "branch", branch)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter
def branch(self) -> str:
"""
The repository branch used to publish the site's source files. (i.e. `main` or `gh-pages`.
"""
return pulumi.get(self, "branch")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
The repository directory from which the site publishes (Default: `/`).
"""
return pulumi.get(self, "path")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RepositoryTemplate(dict):
def __init__(__self__, *,
owner: str,
repository: str):
pulumi.set(__self__, "owner", owner)
pulumi.set(__self__, "repository", repository)
@property
@pulumi.getter
def owner(self) -> str:
return pulumi.get(self, "owner")
@property
@pulumi.getter
def repository(self) -> str:
return pulumi.get(self, "repository")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RepositoryWebhookConfiguration(dict):
def __init__(__self__, *,
url: str,
content_type: Optional[str] = None,
insecure_ssl: Optional[bool] = None,
secret: Optional[str] = None):
"""
:param str url: URL of the webhook. This is a sensitive attribute because it may include basic auth credentials.
"""
pulumi.set(__self__, "url", url)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if insecure_ssl is not None:
pulumi.set(__self__, "insecure_ssl", insecure_ssl)
if secret is not None:
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter
def url(self) -> str:
"""
URL of the webhook. This is a sensitive attribute because it may include basic auth credentials.
"""
return pulumi.get(self, "url")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[str]:
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="insecureSsl")
def insecure_ssl(self) -> Optional[bool]:
return pulumi.get(self, "insecure_ssl")
@property
@pulumi.getter
def secret(self) -> Optional[str]:
return pulumi.get(self, "secret")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TeamSyncGroupMappingGroup(dict):
def __init__(__self__, *,
group_description: str,
group_id: str,
group_name: str):
"""
:param str group_description: The description of the IdP group.
:param str group_id: The ID of the IdP group.
:param str group_name: The name of the IdP group.
"""
pulumi.set(__self__, "group_description", group_description)
pulumi.set(__self__, "group_id", group_id)
pulumi.set(__self__, "group_name", group_name)
@property
@pulumi.getter(name="groupDescription")
def group_description(self) -> str:
"""
The description of the IdP group.
"""
return pulumi.get(self, "group_description")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> str:
"""
The ID of the IdP group.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="groupName")
def group_name(self) -> str:
"""
The name of the IdP group.
"""
return pulumi.get(self, "group_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GetCollaboratorsCollaboratorResult(dict):
def __init__(__self__, *,
events_url: str,
followers_url: | |
source project
# otherwise we won't be able to find them
for repoView in manifest.repositories.values():
repoSpec = repoView.repository
if repoSpec.name == "self":
continue
repo = sourceProject.find_git_repo_from_repository(repoSpec)
if repo:
targetProject.find_or_clone(repo)
def _create_ensemble_repo(manifest, repo):
destDir = os.path.dirname(manifest.manifest.path)
if not repo:
repo = _create_repo(destDir)
elif not os.path.isdir(destDir):
os.makedirs(destDir)
manifest.metadata["uri"] = repo.get_url_with_path(manifest.manifest.path)
with open(manifest.manifest.path, "w") as f:
manifest.dump(f)
repo.repo.index.add([manifest.manifest.path])
repo.repo.index.commit("Default ensemble repository boilerplate")
return repo
def _looks_like(path, name):
# in case path is a directory:
if os.path.isfile(os.path.join(path, name)):
return path, name
if path.endswith(name): # name is explicit so don't need to check if file exists
return os.path.split(path)
return None
def _get_ensemble_paths(sourcePath, sourceProject):
"""
Returns either a pointer to the ensemble to clone
or a dict of variables to pass to an ensemble template to create a new one
if sourcePath doesn't exist, return {}
look for an ensemble given sourcePath (unless sourcePath looks like a service template)
if that fails look for (ensemble-template, service-template) if sourcePath is a directory
otherwise
return {}
"""
template = None
relPath = sourcePath or "."
if not os.path.exists(relPath):
raise UnfurlError(
f'Given clone source "{os.path.abspath(relPath)}" does not exist.'
)
# we only support cloning TOSCA service templates if their names end in "service-template.yaml"
isServiceTemplate = sourcePath.endswith(DefaultNames.ServiceTemplate)
if not isServiceTemplate:
try:
localEnv = LocalEnv(relPath, project=sourceProject)
sourceDir = sourceProject.get_relative_path(
os.path.dirname(localEnv.manifestPath)
)
# note: if sourceDir.startswith("..") then ensemble lives in another's project's repo
return dict(sourceDir=sourceDir, localEnv=localEnv)
except UnfurlError:
# XXX if UnfurlError is "could not find external project", reraise
pass
# didn't find the specified file (or the default ensemble if none was specified)
# so if sourcePath was a directory try for one of the default template files
if isServiceTemplate or os.path.isdir(relPath):
# look for an ensemble-template or service-template in source path
if os.path.isdir(os.path.join(sourcePath, DefaultNames.ProjectDirectory)):
sourcePath = os.path.join(sourcePath, DefaultNames.ProjectDirectory)
template = _looks_like(sourcePath, DefaultNames.EnsembleTemplate)
if template:
sourceDir = sourceProject.get_relative_path(template[0])
return dict(sourceDir=sourceDir, ensembleTemplate=template[1])
template = _looks_like(sourcePath, DefaultNames.ServiceTemplate)
if template:
sourceDir = sourceProject.get_relative_path(template[0])
return dict(sourceDir=sourceDir, serviceTemplate=template[1])
# nothing valid found
return {}
def _create_ensemble_from_template(templateVars, project, destDir, manifestName):
from unfurl import yamlmanifest
assert project
sourceDir = os.path.normpath(
os.path.join(project.projectRoot, templateVars["sourceDir"])
)
specRepo, relPath, revision, bare = project.find_path_in_repos(sourceDir)
if not specRepo:
raise UnfurlError(
'"%s" is not in a git repository. Cloning from plain file directories not yet supported'
% os.path.abspath(sourceDir)
)
manifestPath = write_ensemble_manifest(
os.path.join(project.projectRoot, destDir),
manifestName,
specRepo,
sourceDir,
templateVars,
)
localEnv = LocalEnv(manifestPath, project=project)
manifest = yamlmanifest.ReadOnlyManifest(localEnv=localEnv)
return localEnv, manifest
def find_project(source, home_path):
sourceRoot = Project.find_path(source)
if sourceRoot:
if home_path:
return Project(sourceRoot, Project(home_path))
return Project(sourceRoot)
return None
def _get_context_and_shared_repo(project, options):
# when creating ensemble, get the default project for the given context if set
# XXX if not --new-repository
shared_repo = None
shared = options.get("shared_repository")
context = options.get("use_environment")
if not context:
context = project.get_default_context()
if not shared and context:
shared = project.get_default_project_path(context)
if shared:
shared_repo = Repo.find_containing_repo(shared)
if not shared_repo:
raise UnfurlError("can not find shared repository " + shared)
return context, shared_repo
class EnsembleBuilder:
def __init__(self, source: str, ensemble_name: str, options: dict):
# user specified url or path
self.input_source = source
self.options = options
self.ensemble_name = ensemble_name
self.mono = options.get("mono") or options.get("existing")
self.home_path = get_home_config_path(options.get("home"))
self.source_project = None # step 1
self.source_path = None # step 1 relative path in source_project
self.templateVars = None # step 2
self.environment = None # step 2 environment name
self.shared_repo = None # step 2
self.dest_project = None # step 3
self.dest_path = None # step 3 relative path in dest_project
self.manifest = None # final step
def create_project_from_ensemble(self, dest):
# XXX create a new project from scratch for the ensemble
# if os.path.exists(dest) and os.listdir(dest):
# raise UnfurlError(
# 'Can not create a project in "%s": folder is not empty' % dest
# )
# newHome, projectConfigPath, repo = createProject(
# dest, empty=True, **options
# )
# return Project(projectConfigPath)
raise UnfurlError(
f"Can't clone \"{self.input_source}\": it isn't in an Unfurl project or repository"
)
def configure(self):
assert not self.templateVars
# source is a path into the project relative to the current directory
source_path = os.path.join(self.source_project.projectRoot, self.source_path)
self.templateVars = _get_ensemble_paths(
source_path,
self.source_project,
)
(self.environment, self.shared_repo) = _get_context_and_shared_repo(
self.source_project, self.options
)
@staticmethod
def _get_ensemble_dir(targetPath):
assert not os.path.isabs(targetPath)
if not targetPath or targetPath == ".":
destDir, manifestName = (
DefaultNames.EnsembleDirectory,
DefaultNames.Ensemble,
)
elif targetPath.endswith(".yaml") or targetPath.endswith(".yml"):
destDir, manifestName = os.path.split(targetPath)
else:
destDir = targetPath
manifestName = DefaultNames.Ensemble
return destDir, manifestName
def create_new_ensemble(self):
"""
If "localEnv" is in templateVars, clone that ensemble;
otherwise create one from a template with templateVars
"""
from unfurl import yamlmanifest
if self.shared_repo:
destProject = find_project(self.shared_repo.working_dir, self.home_path)
assert destProject
else:
destProject = self.dest_project
assert destProject
assert self.templateVars
assert not self.manifest
assert self.dest_path is not None
destDir, manifestName = self._get_ensemble_dir(self.dest_path)
# choose a destDir that doesn't conflict with an existing folder
# (i.e. if default ensemble already exists)
destDir = destProject.get_unique_path(destDir)
# destDir is now absolute
targetPath = os.path.normpath(os.path.join(destDir, manifestName))
assert (not self.shared_repo) or targetPath.startswith(
self.shared_repo.working_dir
), (
targetPath,
self.shared_repo.working_dir,
)
templateVars = self.templateVars
if "localEnv" not in templateVars:
# we found a template file to clone
localEnv, manifest = _create_ensemble_from_template(
self.templateVars, destProject, destDir, manifestName
)
else:
# didn't find a template file
# look for an ensemble at the given path or use the source project's default
localEnv = templateVars["localEnv"]
manifest = yamlmanifest.clone(localEnv, targetPath)
_create_ensemble_repo(
manifest,
self.shared_repo or self.mono and self.dest_project.project_repoview.repo,
)
if destProject.projectRoot != self.dest_project.projectRoot:
# cross reference each other
destProject.register_ensemble(
manifest.path, managedBy=self.dest_project, context=self.environment
)
self.dest_project.register_ensemble(
manifest.path, project=destProject, context=self.environment
)
else:
destProject.register_ensemble(manifest.path, context=self.environment)
self.manifest = manifest
return destDir
def clone_local_project(self, sourceProject, dest_dir):
# clone the source project's git repo
self.source_path = sourceProject.get_relative_path(self.input_source)
assert not self.source_path.startswith(
".."
), f"{self.source_path} should be inside the project"
newrepo = sourceProject.project_repoview.repo.clone(dest_dir)
search = os.path.join(
dest_dir, sourceProject.project_repoview.path, self.source_path
)
self.source_project = find_project(search, self.home_path)
assert (
self.source_project
), f"project not found in {search}, cloned to {newrepo.working_dir}"
return self.source_project
def clone_remote_project(self, destDir):
# check if source is a git url
repoURL, filePath, revision = split_git_url(self.input_source)
if os.path.exists(destDir) and os.listdir(destDir):
raise UnfurlError(
f'Can not clone project into "{destDir}": folder is not empty'
)
# #lone the remote repo to destDir
Repo.create_working_dir(repoURL, destDir, revision)
targetDir = os.path.join(destDir, filePath)
sourceRoot = Project.find_path(targetDir)
if not sourceRoot:
raise UnfurlError(
f'Error: cloned "{self.input_source}" to "{destDir}" but couldn\'t find an Unfurl project'
)
self.source_project = find_project(sourceRoot, self.home_path)
# set source to point to the cloned project
self.source_path = self.source_project.get_relative_path(targetDir)
return self.source_project
def set_dest_project_and_path(
self, existingSourceProject, existingDestProject, dest
):
assert self.dest_project is None
new_project = self.source_project is not existingSourceProject
if existingDestProject:
# set that as the dest_project
self.dest_project = existingDestProject
if existingSourceProject is not existingDestProject and new_project:
# we cloned a new source project inside of an existing project
# add the cloned project's repo to the currentProject so we can find it later
# to set it as the ensemble's spec repository
existingDestProject.workingDirs[
self.source_project.projectRoot
] = self.source_project.project_repoview
# path from dest to source
else:
# otherwise set source_project as the dest_project
self.dest_project = self.source_project
if new_project:
# finishing creating the new project
# create local/unfurl.yaml in the new project
_create_local_config(self.source_project)
# set "" as dest because we already "consumed" dest by cloning the project to that location
dest = ""
if os.path.isabs(dest):
relDestDir = self.dest_project.get_relative_path(dest)
assert not relDestDir.startswith(".."), relDestDir
else:
relDestDir = dest.lstrip(".")
if (
self.ensemble_name
and self.ensemble_name != DefaultNames.EnsembleDirectory
or relDestDir == "."
or not relDestDir
):
relDestDir = self.ensemble_name
self.dest_path = relDestDir
def set_existing_ensemble(self, sourceProject):
from unfurl import yamlmanifest
if self.source_project is not sourceProject and not self.shared_repo:
if "localEnv" in self.templateVars and os.path.exists(
Path(self.dest_project.projectRoot) / self.dest_path
):
# the ensemble is already part of the source project repository or a submodule
localEnv = self.templateVars["localEnv"]
self.manifest = yamlmanifest.ReadOnlyManifest(localEnv=localEnv)
return self.manifest
return None
def set_source(self, sourceProject):
self.source_project = sourceProject
# make source relative to the source project
source_path = sourceProject.get_relative_path(self.input_source)
assert not source_path.startswith("..")
self.source_path = source_path
def set_ensemble(self, isRemote, existingSourceProject, existingDestProject):
sourceWasCloned = self.source_project is not existingSourceProject
destIsNew = not existingDestProject
if destIsNew and self.set_existing_ensemble(existingSourceProject):
# if dest_project is new (we just cloned it)
# check if we cloned the ensemble | |
to map it to an entity within this project.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise throws a 404 Not Found. Will also assert that the entity type inferred from the
path matches the type of the entity at that url.
:param str path: entity path from the v1 API
:rtype FigsharePath:
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) not in (2, 3):
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
article_id = path_parts[1]
file_id = path_parts[2] if len(path_parts) == 3 else None
articles = await self._get_all_articles()
# TODO: need better way to get public/private
# This call's return value is currently busted at figshare for collections. Figshare always
# returns private-looking urls.
is_public = False
for item in articles:
if '/articles/' + article_id in item['url']:
article_name = item['title']
if settings.PRIVATE_IDENTIFIER not in item['url']:
is_public = True
article_segments = (*self.root_path_parts, 'articles', article_id)
if file_id:
file_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments, 'files', file_id),
expects=(200, ),
)
file_json = await file_response.json()
file_name = file_json['name']
if path[-1] == '/':
raise exceptions.NotFoundError('File paths must not end with "/". '
'{} not found.'.format(path))
return FigsharePath('/' + article_name + '/' + file_name,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=is_public)
article_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments),
expects=(200, ),
)
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
if not path[-1] == '/':
raise exceptions.NotFoundError('Folder paths must end with "/". {} not found.'.format(path))
return FigsharePath('/' + article_name + '/', _ids=(self.container_id, article_id),
folder=True, is_public=is_public)
raise exceptions.NotFoundError('This article is not configured as a folder defined_type. '
'{} not found.'.format(path))
async def validate_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this project.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise returns a FigsharePath with empty identifiers.
:param str path: identifier_path URN as passed through the v0 API
:rtype FigsharePath:
Quirks:
* v0 may pass an identifier_path whose last part is a name and not an identifier, in the
case of file/folder creation calls.
* validate_path validates parent and returns a FigsharePath as accurately as possible.
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) not in (2, 3):
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
article_id = path_parts[1]
file_id = path_parts[2] if len(path_parts) == 3 else None
articles = await self._get_all_articles()
# TODO: need better way to get public/private
# This call's return value is currently busted at figshare for collections. Figshare always
# returns private-looking urls.
is_public = False
for item in articles:
if '/articles/' + article_id in item['url']:
article_name = item['title']
if settings.PRIVATE_IDENTIFIER not in item['url']:
is_public = True
article_segments = (*self.root_path_parts, 'articles', article_id)
if file_id:
file_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments, 'files', file_id),
expects=(200, 404, ),
)
if file_response.status == 200:
file_response_json = await file_response.json()
file_name = file_response_json['name']
return FigsharePath('/' + article_name + '/' + file_name,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=is_public)
await file_response.release()
article_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments),
expects=(200, 404, ),
)
if article_response.status == 200:
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
# Case of v0 file creation
if file_id:
ids = ('', article_id, '')
folder = False
path_urn = '/' + article_name + '/' + file_id
else:
ids = ('', article_id)
folder = True
path_urn = '/' + article_name + '/'
return FigsharePath(path_urn, _ids=ids, folder=folder, is_public=is_public)
else:
await article_response.release()
if file_id:
# Catch for if neither file nor article exist
raise exceptions.NotFoundError(path)
# Return for v0 folder creation
return FigsharePath(path, _ids=('', ''), folder=True, is_public=False)
async def revalidate_path(self, parent_path, child_name, folder):
"""Look for file or folder named ``child_name`` under ``parent_path``. If it finds a match,
it returns a FigsharePath object with the appropriate ids set. Otherwise, it returns a
FigsharePath where the ids are set to ``None``.
Due to the fact that figshare allows duplicate titles/names for
articles/files, revalidate_path can not be relied on to always return
the correct id of an existing child_name. It will return the first id that
matches the folder and child_name arguments or '' if no match.
:param FigsharePath parent_path: Path of parent
:param str child_name: Name of child
:param bool folder: ``True`` if child is folder
:rtype: ``FigsharePath``
:return: a FigsharePath object, with ids set if a match was found
"""
parent_is_folder = False
urn_parts = (*self.root_path_parts, 'articles')
child_id = None
if not parent_path.is_root: # parent is fileset or article
if not folder: # child is article/file
list_children_response = await self.make_request(
'GET',
self.build_url(False, *urn_parts, parent_path.identifier),
expects=(200, ),
)
article_json = await list_children_response.json()
for file in article_json['files']:
if file['name'] == child_name:
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
# parent is root
children = await self._get_all_articles()
articles = await asyncio.gather(*[
self._get_url_super(article_json['url'])
for article_json in children
])
for article in articles:
is_folder = article['defined_type'] in settings.FOLDER_TYPES
article_id = str(article['id'])
article_name = str(article['title'])
if folder != is_folder:
continue
elif folder:
if article_name == child_name:
child_id = article_id
break
else:
parent_is_folder = False
for file in article['files']:
if file['name'] == child_name:
parent_path = parent_path.child(article_name, _id=article_id, folder=False)
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
async def upload(self, stream, path, conflict='replace', **kwargs):
"""Upload a file to provider root or to an article whose defined_type is
configured to represent a folder.
:param asyncio.StreamReader stream: stream to upload
:param FigsharePath path: FigsharePath to upload the file to.
:param dict \*\*kwargs: Will be passed to returned metadata object
"""
if path.identifier and conflict == 'replace':
raise exceptions.UnsupportedOperationError('Files in Figshare cannot be updated')
path, exists = await self.handle_name_conflict(path, conflict=conflict)
if not path.parent.is_root:
parent_resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parent.identifier),
expects=(200, ),
)
parent_json = await parent_resp.json()
if not parent_json['defined_type'] in settings.FOLDER_TYPES:
del path._parts[1]
# Create article or retrieve article_id from existing article
if not path.parent.is_root:
article_id = path.parent.identifier
else:
article_name = json.dumps({'title': path.name})
if self.container_type == 'project':
article_id = await self._create_article(article_name)
elif self.container_type == 'collection':
# TODO don't think this is correct. Probably should POST to /accounts/articles
article_id = await self._create_article(article_name)
article_list = json.dumps({'articles': [article_id]})
await self.make_request(
'POST',
self.build_url(False, *self.root_path_parts, 'articles'),
data=article_list,
expects=(201, ),
)
file_id = await self._upload_file(article_id, path.name, stream)
# Build new file path and return metadata
path = FigsharePath('/' + article_id + '/' + file_id,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=False)
return (await self.metadata(path, **kwargs)), True
async def create_folder(self, path, **kwargs):
"""Create a folder at ``path``. Returns a `FigshareFolderMetadata` object if successful.
:param FigsharePath path: FigsharePath representing the folder to create
:rtype: :class:`waterbutler.core.metadata.FigshareFolderMetadata`
:raises: :class:`waterbutler.core.exceptions.CreateFolderError`
"""
if (len(path.parts) == 2) and path.is_folder:
article_name = path.parts[-1].value
else:
raise exceptions.CreateFolderError(
'Only projects and collections may contain folders. Unable to create '
'"{}/"'.format(path.name),
code=400,
)
article_data = json.dumps({'title': article_name, 'defined_type': 'fileset'})
article_id = await self._create_article(article_data)
get_article_response = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', article_id),
expects=(200, ),
throws=exceptions.NotFoundError,
)
article_json = await get_article_response.json()
return metadata.FigshareFolderMetadata(article_json)
async def delete(self, path, confirm_delete=0, **kwargs):
"""Delete the entity at ``path``.
:param FigsharePath path: Path to be deleted
:param int confirm_delete: Must be 1 to confirm root folder delete
:rtype: None
:raises: :class:`waterbutler.core.exceptions.NotFoundError`
:raises: :class:`waterbutler.core.exceptions.DeleteError`
Quirks:
* If the FigsharePath given is for the provider root path, then the contents of the
provider root path will be deleted, but not the provider root itself.
"""
if not path.identifier:
raise exceptions.NotFoundError(str(path))
if path.is_root:
if confirm_delete == 1:
return await self._delete_container_contents()
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400
)
if len(path.parts) == 2:
if not path.is_folder:
raise exceptions.NotFoundError(str(path))
delete_path = (*self.root_path_parts, 'articles', path.parts[1]._id)
elif len(path.parts) == 3:
if path.is_folder:
raise exceptions.NotFoundError(str(path))
article_response = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parts[1]._id),
expects=(200, ),
)
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
delete_path = ('articles', path.parts[1]._id, 'files', path.parts[2]._id)
else:
delete_path = (*self.root_path_parts, 'articles', path.parts[1]._id)
delete_article_response = await self.make_request(
'DELETE',
self.build_url(False, *delete_path),
expects=(204, ),
)
| |
= {}
result['auth_token'] = self.auth_token
result['product_instance_id'] = self.product_instance_id
result['region_name'] = self.region_name
result['mobile'] = self.mobile
return result
def from_map(self, map={}):
self.auth_token = map.get('auth_token')
self.product_instance_id = map.get('product_instance_id')
self.region_name = map.get('region_name')
self.mobile = map.get('mobile')
return self
class QueryBaasPlusYdpacprotEcpacResponse(TeaModel):
def __init__(self, req_msg_id=None, result_code=None, result_msg=None, decision=None, passed=None, score=None,
strategies=None):
self.req_msg_id = req_msg_id
self.result_code = result_code
self.result_msg = result_msg
self.decision = decision
self.passed = passed
self.score = score
self.strategies = strategies
def validate(self):
pass
def to_map(self):
result = {}
result['req_msg_id'] = self.req_msg_id
result['result_code'] = self.result_code
result['result_msg'] = self.result_msg
result['decision'] = self.decision
result['passed'] = self.passed
result['score'] = self.score
result['strategies'] = []
if self.strategies is not None:
for k in self.strategies:
result['strategies'].append(k)
else:
result['strategies'] = None
return result
def from_map(self, map={}):
self.req_msg_id = map.get('req_msg_id')
self.result_code = map.get('result_code')
self.result_msg = map.get('result_msg')
self.decision = map.get('decision')
self.passed = map.get('passed')
self.score = map.get('score')
self.strategies = []
if map.get('strategies') is not None:
for k in map.get('strategies'):
self.strategies.append(k)
else:
self.strategies = None
return self
class QueryBaasPlusYdauthprotTwometaRequest(TeaModel):
def __init__(self, auth_token=None, product_instance_id=None, region_name=None, cert_no=None, user_name=None):
self.auth_token = auth_token
self.product_instance_id = product_instance_id
self.region_name = region_name
self.cert_no = cert_no
self.user_name = user_name
def validate(self):
pass
def to_map(self):
result = {}
result['auth_token'] = self.auth_token
result['product_instance_id'] = self.product_instance_id
result['region_name'] = self.region_name
result['cert_no'] = self.cert_no
result['user_name'] = self.user_name
return result
def from_map(self, map={}):
self.auth_token = map.get('auth_token')
self.product_instance_id = map.get('product_instance_id')
self.region_name = map.get('region_name')
self.cert_no = map.get('cert_no')
self.user_name = map.get('user_name')
return self
class QueryBaasPlusYdauthprotTwometaResponse(TeaModel):
def __init__(self, req_msg_id=None, result_code=None, result_msg=None, decision=None, passed=None, score=None,
strategies=None):
self.req_msg_id = req_msg_id
self.result_code = result_code
self.result_msg = result_msg
self.decision = decision
self.passed = passed
self.score = score
self.strategies = strategies
def validate(self):
pass
def to_map(self):
result = {}
result['req_msg_id'] = self.req_msg_id
result['result_code'] = self.result_code
result['result_msg'] = self.result_msg
result['decision'] = self.decision
result['passed'] = self.passed
result['score'] = self.score
result['strategies'] = []
if self.strategies is not None:
for k in self.strategies:
result['strategies'].append(k)
else:
result['strategies'] = None
return result
def from_map(self, map={}):
self.req_msg_id = map.get('req_msg_id')
self.result_code = map.get('result_code')
self.result_msg = map.get('result_msg')
self.decision = map.get('decision')
self.passed = map.get('passed')
self.score = map.get('score')
self.strategies = []
if map.get('strategies') is not None:
for k in map.get('strategies'):
self.strategies.append(k)
else:
self.strategies = None
return self
class QueryBaasPlusYdauthprotThreemetaRequest(TeaModel):
def __init__(self, auth_token=None, product_instance_id=None, region_name=None, cert_no=None, mobile=None,
user_name=None):
self.auth_token = auth_token
self.product_instance_id = product_instance_id
self.region_name = region_name
self.cert_no = cert_no
self.mobile = mobile
self.user_name = user_name
def validate(self):
pass
def to_map(self):
result = {}
result['auth_token'] = self.auth_token
result['product_instance_id'] = self.product_instance_id
result['region_name'] = self.region_name
result['cert_no'] = self.cert_no
result['mobile'] = self.mobile
result['user_name'] = self.user_name
return result
def from_map(self, map={}):
self.auth_token = map.get('auth_token')
self.product_instance_id = map.get('product_instance_id')
self.region_name = map.get('region_name')
self.cert_no = map.get('cert_no')
self.mobile = map.get('mobile')
self.user_name = map.get('user_name')
return self
class QueryBaasPlusYdauthprotThreemetaResponse(TeaModel):
def __init__(self, req_msg_id=None, result_code=None, result_msg=None, decision=None, passed=None, score=None,
strategies=None):
self.req_msg_id = req_msg_id
self.result_code = result_code
self.result_msg = result_msg
self.decision = decision
self.passed = passed
self.score = score
self.strategies = strategies
def validate(self):
pass
def to_map(self):
result = {}
result['req_msg_id'] = self.req_msg_id
result['result_code'] = self.result_code
result['result_msg'] = self.result_msg
result['decision'] = self.decision
result['passed'] = self.passed
result['score'] = self.score
result['strategies'] = []
if self.strategies is not None:
for k in self.strategies:
result['strategies'].append(k)
else:
result['strategies'] = None
return result
def from_map(self, map={}):
self.req_msg_id = map.get('req_msg_id')
self.result_code = map.get('result_code')
self.result_msg = map.get('result_msg')
self.decision = map.get('decision')
self.passed = map.get('passed')
self.score = map.get('score')
self.strategies = []
if map.get('strategies') is not None:
for k in map.get('strategies'):
self.strategies.append(k)
else:
self.strategies = None
return self
class QueryBaasPlusYdauthprotFourmetaRequest(TeaModel):
def __init__(self, auth_token=None, product_instance_id=None, region_name=None, card_no=None, cert_no=None,
mobile=None, user_name=None):
self.auth_token = auth_token
self.product_instance_id = product_instance_id
self.region_name = region_name
self.card_no = card_no
self.cert_no = cert_no
self.mobile = mobile
self.user_name = user_name
def validate(self):
pass
def to_map(self):
result = {}
result['auth_token'] = self.auth_token
result['product_instance_id'] = self.product_instance_id
result['region_name'] = self.region_name
result['card_no'] = self.card_no
result['cert_no'] = self.cert_no
result['mobile'] = self.mobile
result['user_name'] = self.user_name
return result
def from_map(self, map={}):
self.auth_token = map.get('auth_token')
self.product_instance_id = map.get('product_instance_id')
self.region_name = map.get('region_name')
self.card_no = map.get('card_no')
self.cert_no = map.get('cert_no')
self.mobile = map.get('mobile')
self.user_name = map.get('user_name')
return self
class QueryBaasPlusYdauthprotFourmetaResponse(TeaModel):
def __init__(self, req_msg_id=None, result_code=None, result_msg=None, decision=None, passed=None, score=None,
strategies=None):
self.req_msg_id = req_msg_id
self.result_code = result_code
self.result_msg = result_msg
self.decision = decision
self.passed = passed
self.score = score
self.strategies = strategies
def validate(self):
pass
def to_map(self):
result = {}
result['req_msg_id'] = self.req_msg_id
result['result_code'] = self.result_code
result['result_msg'] = self.result_msg
result['decision'] = self.decision
result['passed'] = self.passed
result['score'] = self.score
result['strategies'] = []
if self.strategies is not None:
for k in self.strategies:
result['strategies'].append(k)
else:
result['strategies'] = None
return result
def from_map(self, map={}):
self.req_msg_id = map.get('req_msg_id')
self.result_code = map.get('result_code')
self.result_msg = map.get('result_msg')
self.decision = map.get('decision')
self.passed = map.get('passed')
self.score = map.get('score')
self.strategies = []
if map.get('strategies') is not None:
for k in map.get('strategies'):
self.strategies.append(k)
else:
self.strategies = None
return self
class QueryBaasPlusYdmktprotEcmarketcampaignRequest(TeaModel):
def __init__(self, auth_token=None, product_instance_id=None, region_name=None, mobile=None):
self.auth_token = auth_token
self.product_instance_id = product_instance_id
self.region_name = region_name
self.mobile = mobile
def validate(self):
pass
def to_map(self):
result = {}
result['auth_token'] = self.auth_token
result['product_instance_id'] = self.product_instance_id
result['region_name'] = self.region_name
result['mobile'] = self.mobile
return result
def from_map(self, map={}):
self.auth_token = map.get('auth_token')
self.product_instance_id = map.get('product_instance_id')
self.region_name = map.get('region_name')
self.mobile = map.get('mobile')
return self
class QueryBaasPlusYdmktprotEcmarketcampaignResponse(TeaModel):
def __init__(self, req_msg_id=None, result_code=None, result_msg=None, decision=None, passed=None, score=None,
strategies=None):
self.req_msg_id = req_msg_id
self.result_code = result_code
self.result_msg = result_msg
self.decision = decision
self.passed = passed
self.score = score
self.strategies = strategies
def validate(self):
pass
def to_map(self):
result = {}
result['req_msg_id'] = self.req_msg_id
result['result_code'] = self.result_code
result['result_msg'] = self.result_msg
result['decision'] = self.decision
result['passed'] = self.passed
result['score'] = self.score
result['strategies'] = []
if self.strategies is not None:
for k in self.strategies:
result['strategies'].append(k)
else:
result['strategies'] = None
return result
def from_map(self, map={}):
self.req_msg_id = map.get('req_msg_id')
self.result_code = map.get('result_code')
self.result_msg = map.get('result_msg')
self.decision = map.get('decision')
self.passed = map.get('passed')
self.score = map.get('score')
self.strategies = []
if map.get('strategies') is not None:
for k in map.get('strategies'):
self.strategies.append(k)
else:
self.strategies = None
return self
class QueryBaasPlusYdregprotEcregisterRequest(TeaModel):
def __init__(self, auth_token=None, product_instance_id=None, region_name=None, mobile=None):
self.auth_token = auth_token
self.product_instance_id = product_instance_id
self.region_name = region_name
self.mobile = mobile
def validate(self):
pass
def to_map(self):
result = {}
result['auth_token'] = self.auth_token
result['product_instance_id'] = self.product_instance_id
result['region_name'] = self.region_name
result['mobile'] = self.mobile
return result
def from_map(self, map={}):
self.auth_token = map.get('auth_token')
self.product_instance_id = map.get('product_instance_id')
self.region_name = map.get('region_name')
self.mobile = map.get('mobile')
return self
class QueryBaasPlusYdregprotEcregisterResponse(TeaModel):
def __init__(self, req_msg_id=None, result_code=None, result_msg=None, decision=None, passed=None, score=None,
strategies=None):
self.req_msg_id = req_msg_id
self.result_code = result_code
self.result_msg = result_msg
self.decision = decision
self.passed = passed
self.score = score
self.strategies = strategies
def validate(self):
pass
def to_map(self):
result = {}
result['req_msg_id'] = self.req_msg_id
result['result_code'] = self.result_code
result['result_msg'] = self.result_msg
result['decision'] = self.decision
result['passed'] = self.passed
result['score'] = self.score
result['strategies'] = []
if self.strategies is not None:
for k in self.strategies:
result['strategies'].append(k)
else:
result['strategies'] = None
return result
def from_map(self, map={}):
self.req_msg_id = map.get('req_msg_id')
self.result_code = map.get('result_code')
self.result_msg = map.get('result_msg')
self.decision = map.get('decision')
self.passed = map.get('passed')
self.score = map.get('score')
self.strategies = []
if map.get('strategies') is not None:
for k in map.get('strategies'):
self.strategies.append(k)
else:
self.strategies = None
return self
class QueryBaasPlusEpayauthBranchbankRequest(TeaModel):
def __init__(self, auth_token=None, product_instance_id=None, region_name=None, bank_name=None,
district_code=None, root_bank_code=None):
self.auth_token = auth_token
self.product_instance_id = product_instance_id
self.region_name = region_name
self.bank_name = bank_name
self.district_code = district_code
self.root_bank_code = root_bank_code
def validate(self):
pass
def to_map(self):
result = {}
result['auth_token'] = self.auth_token
result['product_instance_id'] = self.product_instance_id
result['region_name'] = self.region_name
result['bank_name'] = self.bank_name
result['district_code'] = self.district_code
result['root_bank_code'] = self.root_bank_code
return result
def from_map(self, map={}):
self.auth_token = map.get('auth_token')
self.product_instance_id = map.get('product_instance_id')
self.region_name = map.get('region_name')
self.bank_name = map.get('bank_name')
self.district_code = map.get('district_code')
self.root_bank_code = map.get('root_bank_code')
return self
class QueryBaasPlusEpayauthBranchbankResponse(TeaModel):
def __init__(self, req_msg_id=None, result_code=None, result_msg=None, bank_details=None):
self.req_msg_id = req_msg_id
self.result_code = result_code
self.result_msg = result_msg
self.bank_details = bank_details
def validate(self):
if self.bank_details:
for k in self.bank_details:
if k:
k.validate()
def to_map(self):
result = {}
result['req_msg_id'] = self.req_msg_id
result['result_code'] = self.result_code
result['result_msg'] = self.result_msg
result['bank_details'] = []
if self.bank_details is not None:
for k in self.bank_details:
result['bank_details'].append(k.to_map() if k else None)
else:
result['bank_details'] = None
return result
def from_map(self, map={}):
self.req_msg_id = map.get('req_msg_id')
self.result_code = map.get('result_code')
self.result_msg = map.get('result_msg')
self.bank_details = []
if map.get('bank_details') is not None:
for k in map.get('bank_details'):
temp_model = Institution()
temp_model = temp_model.from_map(k)
self.bank_details.append(temp_model)
else:
self.bank_details = None
return self
class QueryBaasPlusEpayauthDistrictRequest(TeaModel):
def __init__(self, auth_token=None, product_instance_id=None, region_name=None, parent_code=None):
self.auth_token = auth_token
self.product_instance_id = product_instance_id
self.region_name = region_name
self.parent_code = parent_code
| |
/ abc[NPA - 1])
elif NPA == 2:
abc[0] = abc[1]
abc[2] = (volume / x)(thickness1 ** 2)
elif NPA == 1:
abc[1] = abc[0]
abc[2] = (volume / x) / (thickness1 ** 2)
para = np.array([abc[0], abc[1], abc[2], alpha, beta, gamma])
a, b, c = abc[0], abc[1], abc[2]
maxvec = (a * b * c) / (minvec ** 2)
# Define limits on cell dimensions
if "min_l" not in kwargs:
min_l = minvec
else:
min_l = kwargs["min_l"]
if "mid_l" not in kwargs:
mid_l = min_l
else:
mid_l = kwargs["mid_l"]
if "max_l" not in kwargs:
max_l = mid_l
else:
max_l = kwargs["max_l"]
l_min = min(a, b, c)
l_max = max(a, b, c)
for x in (a, b, c):
if x <= l_max and x >= l_min:
l_mid = x
if not (l_min >= min_l and l_mid >= mid_l and l_max >= max_l):
continue
if minvec < maxvec:
smallvec = min(
a * np.cos(max(beta, gamma)),
b * np.cos(max(alpha, gamma)),
c * np.cos(max(alpha, beta)),
)
if (
a > minvec
and b > minvec
and c > minvec
and a < maxvec
and b < maxvec
and c < maxvec
and smallvec < minvec
and alpha > minangle
and beta > minangle
and gamma > minangle
and alpha < maxangle
and beta < maxangle
and gamma < maxangle
and a / b < max_ratio
and a / c < max_ratio
and b / c < max_ratio
and b / a < max_ratio
and c / a < max_ratio
and c / b < max_ratio
):
return para
# If maxattempts tries have been made without success
msg = "Cannot get lattice after {:d} cycles for volume {:.2f}".format(maxattempts, volume)
raise VolumeError(msg)
def generate_lattice_1D(
ltype,
volume,
area=None,
minvec=1.2,
minangle=np.pi / 6,
max_ratio=10.0,
maxattempts=100,
**kwargs
):
"""
Generates a lattice (3x3 matrix) according to the spacegroup symmetry and
number of atoms. If the spacegroup has centering, we will transform to
conventional cell setting. If the generated lattice does not meet the
minimum angle and vector requirements, we try to generate a new one, up to
maxattempts times.
Note: The monoclinic Rod groups have different unique axes. Groups 3-7
have unique axis a, while 8-12 have unique axis c. We use periodic
axis c for all Rod groups.
Args:
num: number of the Rod group
volume: volume of the lattice
area: cross-sectional area of the unit cell in Angstroms squared. If
set to None, a value is chosen automatically
minvec: minimum allowed lattice vector length (among a, b, and c)
minangle: minimum allowed lattice angle (among alpha, beta, and gamma)
max_ratio: largest allowed ratio of two lattice vector lengths
maxattempts: the maximum number of attempts for generating a lattice
kwargs: a dictionary of optional values. These include:
'unique_axis': the axis ('a', 'b', or 'c') which is not symmetrically
equivalent to the other two
'min_l': the smallest allowed cell vector. The smallest vector must be larger
than this.
'mid_l': the second smallest allowed cell vector. The second smallest vector
must be larger than this.
'max_l': the third smallest allowed cell vector. The largest cell vector must
be larger than this.
Returns:
a 3x3 matrix representing the lattice vectors of the unit cell. If
generation fails, outputs a warning message and returns empty
"""
try:
unique_axis = kwargs["unique_axis"]
except:
unique_axis = "a"
# Store the periodic axis
PA = 3
# Set the unique axis for monoclinic cells
# if num in range(3, 8): unique_axis = "a"
# elif num in range(8, 13): unique_axis = "c"
maxangle = np.pi - minangle
for n in range(maxattempts):
abc = np.ones([3])
if area is None:
v = random_vector()
thickness1 = np.cbrt(volume) * (v[0] / (v[0] * v[1] * v[2]))
else:
thickness1 = volume / area
abc[PA - 1] = thickness1
alpha, beta, gamma = np.pi / 2, np.pi / 2, np.pi / 2
# Triclinic
# if num <= 2:
if ltype == "triclinic":
mat = random_shear_matrix(width=0.2)
a, b, c, alpha, beta, gamma = matrix2para(mat)
x = np.sqrt(
1
- np.cos(alpha) ** 2
- np.cos(beta) ** 2
- np.cos(gamma) ** 2
+ 2 * (np.cos(alpha) * np.cos(beta) * np.cos(gamma))
)
abc[PA - 1] = abc[PA - 1] / x # scale thickness by outer product of vectors
ab = volume / (abc[PA - 1] * x)
ratio = a / b
if PA == 3:
abc[0] = np.sqrt(ab * ratio)
abc[1] = np.sqrt(ab / ratio)
elif PA == 2:
abc[0] = np.sqrt(ab * ratio)
abc[2] = np.sqrt(ab / ratio)
elif PA == 1:
abc[1] = np.sqrt(ab * ratio)
abc[2] = np.sqrt(ab / ratio)
# Monoclinic
# elif num <= 12:
elif ltype == "monoclinic":
a, b, c = random_vector()
if unique_axis == "a":
alhpa = gaussian(minangle, maxangle)
x = np.sin(alpha)
elif unique_axis == "b":
beta = gaussian(minangle, maxangle)
x = np.sin(beta)
elif unique_axis == "c":
gamma = gaussian(minangle, maxangle)
x = np.sin(gamma)
ab = volume / (abc[PA - 1] * x)
ratio = a / b
if PA == 3:
abc[0] = np.sqrt(ab * ratio)
abc[1] = np.sqrt(ab / ratio)
elif PA == 2:
abc[0] = np.sqrt(ab * ratio)
abc[2] = np.sqrt(ab / ratio)
elif PA == 1:
abc[1] = np.sqrt(ab * ratio)
abc[2] = np.sqrt(ab / ratio)
# Orthorhombic
# lif num <= 22:
elif ltype == "orthorhombic":
vec = random_vector()
if PA == 3:
ratio = abs(vec[0] / vec[1]) # ratio a/b
abc[1] = np.sqrt(volume / (thickness1 * ratio))
abc[0] = abc[1] * ratio
elif PA == 2:
ratio = abs(vec[0] / vec[2]) # ratio a/b
abc[2] = np.sqrt(volume / (thickness1 * ratio))
abc[0] = abc[2] * ratio
elif PA == 1:
ratio = abs(vec[1] / vec[2]) # ratio a/b
abc[2] = np.sqrt(volume / (thickness1 * ratio))
abc[1] = abc[2] * ratio
# Tetragonal
# elif num <= 41:
elif ltype == "tetragonal":
if PA == 3:
abc[0] = abc[1] = np.sqrt(volume / thickness1)
elif PA == 2:
abc[0] = abc[1]
abc[2] = volume / (abc[PA - 1] ** 2)
elif PA == 1:
abc[1] = abc[0]
abc[2] = volume / (abc[PA - 1] ** 2)
# Trigonal/Rhombohedral/Hexagonal
# elif num <= 75:
elif ltype in ["hexagonal", "trigonal"]:
gamma = np.pi / 3 * 2
x = np.sqrt(3.0) / 2.0
if PA == 3:
abc[0] = abc[1] = np.sqrt((volume / x) / abc[PA - 1])
elif PA == 2:
abc[0] = abc[1]
abc[2] = (volume / x)(thickness1 ** 2)
elif PA == 1:
abc[1] = abc[0]
abc[2] = (volume / x) / (thickness1 ** 2)
para = np.array([abc[0], abc[1], abc[2], alpha, beta, gamma])
a, b, c = abc[0], abc[1], abc[2]
maxvec = (a * b * c) / (minvec ** 2)
# Define limits on cell dimensions
if "min_l" not in kwargs:
min_l = minvec
else:
min_l = kwargs["min_l"]
if "mid_l" not in kwargs:
mid_l = min_l
else:
mid_l = kwargs["mid_l"]
if "max_l" not in kwargs:
max_l = mid_l
else:
max_l = kwargs["max_l"]
l_min = min(a, b, c)
l_max = max(a, b, c)
for x in (a, b, c):
if x <= l_max and x >= l_min:
l_mid = x
if not (l_min >= min_l and l_mid >= mid_l and l_max >= max_l):
continue
if minvec < maxvec:
smallvec = min(
a * np.cos(max(beta, gamma)),
b * np.cos(max(alpha, gamma)),
c * np.cos(max(alpha, beta)),
)
if (
a > minvec
and b > minvec
and c > minvec
and a < maxvec
and b < maxvec
and c < maxvec
and smallvec < minvec
and alpha > minangle
and beta > minangle
and gamma > minangle
and alpha < maxangle
and beta < maxangle
and gamma < maxangle
and a / b < max_ratio
and a / c < max_ratio
and b / c < max_ratio
and b / a < max_ratio
and c / a < max_ratio
and c / b | |
4096)
self.assertEqual(c2.fetchone()[0], 4096)
c1.execute("pragma branches")
c2.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",)])
self.assertListEqual(c2.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",)])
# try to rename an unexistent branch
with self.assertRaises(sqlite3.OperationalError):
c2.execute("pragma rename_branch test33 must-fail")
# create a new branch on connection 1
c1.execute("pragma new_branch=test2 at master.3")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "test2")
c1.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("test2",)])
c1.execute("pragma branch=test")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "test")
# try to rename it on connection 2
c2.execute("pragma rename_branch test2 new-branch")
c2.execute("pragma branch")
self.assertEqual(c2.fetchone()[0], "master")
c2.execute("pragma branches")
self.assertListEqual(c2.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("new-branch",)])
# check the current branch name on connection 1
# c1.execute("pragma branch")
# self.assertEqual(c1.fetchone()[0], "new-branch")
c1.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("new-branch",)])
c1.execute("pragma branch=new-branch")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "new-branch")
# insert a new value on connection 1
c1.execute("insert into t1 values ('from the new renamed branch')")
conn1.commit()
c1.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("second",),("from the new renamed branch",)])
# check the value on connection 2
c2.execute("pragma branch=new-branch")
c2.execute("pragma branch")
self.assertEqual(c2.fetchone()[0], "new-branch")
c2.execute("select * from t1")
self.assertListEqual(c2.fetchall(), [("first",),("second",),("from the new renamed branch",)])
# close and reopen the connections
conn1.close()
conn2.close()
conn1 = sqlite3.connect('file:test.db?branches=on')
conn2 = sqlite3.connect('file:test.db?branches=on&single_connection=true')
c1 = conn1.cursor()
c2 = conn2.cursor()
# the new name should be there
c1.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("new-branch",)])
c2.execute("pragma branches")
self.assertListEqual(c2.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("new-branch",)])
# rename branch in one conn and try to move to the previous/old name on another conn (should work if single_conn=true)
c1.execute("pragma new_branch=test2 at master.3")
conn1.commit()
c1.execute("pragma rename_branch new-branch renamed-branch")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "test2")
c1.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
# try to move to the previous/old name on connection 2 (should work if single_connection=true)
c2.execute("pragma branch=new-branch")
c2.execute("pragma branch")
self.assertEqual(c2.fetchone()[0], "new-branch")
c2.execute("pragma branches")
self.assertListEqual(c2.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("new-branch",)])
c2.execute("select * from t1")
self.assertListEqual(c2.fetchall(), [("first",),("second",),("from the new renamed branch",)])
c1.execute("pragma branch=renamed-branch")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "renamed-branch")
conn1.close()
conn2.close()
def test08_truncate_branch(self):
# test: truncate in one conn a branch that is in use in another conn, then try to access it in this second conn
# test: truncate in one conn a branch that is NOT in use in another conn, then try to access it in this second conn
import shutil
shutil.copy("test.db","test3.db")
conn1 = sqlite3.connect('file:test.db?branches=on')
conn2 = sqlite3.connect('file:test.db?branches=on')
c1 = conn1.cursor()
c2 = conn2.cursor()
c1.execute("pragma branches")
c2.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
self.assertListEqual(c2.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
c1.execute("pragma branch")
c2.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "master")
self.assertEqual(c2.fetchone()[0], "master")
c1.execute("pragma branch_info(master)")
obj = json.loads(c1.fetchone()[0])
self.assertGreater(obj["total_commits"], 4)
# try to truncate to a not allowed point
with self.assertRaises(sqlite3.OperationalError):
c1.execute("pragma branch_truncate(master.2)")
c1.execute("select * from t1")
c2.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("second",),("third",),("fourth",),("fifth",),("sixth",)])
self.assertListEqual(c2.fetchall(), [("first",),("second",),("third",),("fourth",),("fifth",),("sixth",)])
if not omit_logs:
c1.execute("pragma branch_log master")
c2.execute("pragma branch_log master")
self.assertListEqual(c1.fetchall(), [
("master",1,"create table t1(name)",),
("master",2,"insert into t1 values ('first')",),
("master",3,"insert into t1 values ('second')",),
("master",4,"insert into t1 values ('third')",),
("master",5,"insert into t1 values ('fourth')",),
("master",5,"insert into t1 values ('fifth')",),
("master",5,"insert into t1 values ('sixth')",)
])
self.assertListEqual(c2.fetchall(), [
("master",1,"create table t1(name)",),
("master",2,"insert into t1 values ('first')",),
("master",3,"insert into t1 values ('second')",),
("master",4,"insert into t1 values ('third')",),
("master",5,"insert into t1 values ('fourth')",),
("master",5,"insert into t1 values ('fifth')",),
("master",5,"insert into t1 values ('sixth')",)
])
c1.execute("pragma branch_info(master)")
obj = json.loads(c1.fetchone()[0])
self.assertGreater(obj["total_commits"], 4)
# try to truncate to an allowed point
c1.execute("pragma branch_truncate(master.4)")
c1.execute("select * from t1")
c2.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("second",),("third",)])
self.assertListEqual(c2.fetchall(), [("first",),("second",),("third",)])
if not omit_logs:
c1.execute("pragma branch_log master")
c2.execute("pragma branch_log master")
self.assertListEqual(c1.fetchall(), [
("master",1,"create table t1(name)",),
("master",2,"insert into t1 values ('first')",),
("master",3,"insert into t1 values ('second')",),
("master",4,"insert into t1 values ('third')",),
])
self.assertListEqual(c2.fetchall(), [
("master",1,"create table t1(name)",),
("master",2,"insert into t1 values ('first')",),
("master",3,"insert into t1 values ('second')",),
("master",4,"insert into t1 values ('third')",),
])
# try to move to a deleted point
with self.assertRaises(sqlite3.OperationalError):
c1.execute("pragma branch=master.5")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "master")
c1.execute("pragma branch=master.3")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "master.3")
c1.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("second",)])
c1.execute("pragma branch=master.2")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "master.2")
c1.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",)])
c1.execute("pragma branch=test")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "test")
c1.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("from test branch",),("val1",),(2,),(3.3,)])
# close and reopen the connections
conn1.close()
conn2.close()
conn1 = sqlite3.connect('file:test.db?branches=on')
conn2 = sqlite3.connect('file:test.db?branches=on&single_connection=true')
c1 = conn1.cursor()
c2 = conn2.cursor()
c1.execute("select * from t1")
c2.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("second",),("third",)])
self.assertListEqual(c2.fetchall(), [("first",),("second",),("third",)])
# the new name should be there
c1.execute("pragma branches")
c2.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
self.assertListEqual(c2.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
conn1.close()
conn2.close()
def test09_delete_branch(self):
# test: delete a branch and then try to access its children branches (should work)
# test: delete in one conn then try to access it in another conn (should fail)
# test: delete in one conn a branch that is in use in another conn (should delete. the current branch on the other conn should be invalid)
# test with invalid current branch (or no current branch): query (select), modification (insert),
conn1 = sqlite3.connect('file:test3.db?branches=on')
conn2 = sqlite3.connect('file:test3.db?branches=on')
c1 = conn1.cursor()
c2 = conn2.cursor()
c1.execute("pragma branches")
c2.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
self.assertListEqual(c2.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
c1.execute("pragma branch")
c2.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "master")
self.assertEqual(c2.fetchone()[0], "master")
# try to delete the branch that is the current one (should fail)
with self.assertRaises(sqlite3.OperationalError):
c1.execute("pragma del_branch(master)")
c1.execute("pragma branch")
c2.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "master")
self.assertEqual(c2.fetchone()[0], "master")
c1.execute("pragma branches")
c2.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
self.assertListEqual(c2.fetchall(), [("master",),("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
c1.execute("pragma branch=sub-test1")
c2.execute("pragma branch=sub-test1")
c1.execute("pragma branch")
c2.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "sub-test1")
self.assertEqual(c2.fetchone()[0], "sub-test1")
c1.execute("pragma del_branch(master)")
c1.execute("pragma branches")
c2.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
self.assertListEqual(c2.fetchall(), [("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("test2",)])
c1.execute("pragma branch")
c2.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "sub-test1")
self.assertEqual(c2.fetchone()[0], "sub-test1")
# delete a branch that is currently in use in another db connection
c2.execute("pragma branch=test2")
c2.execute("pragma branch")
self.assertEqual(c2.fetchone()[0], "test2")
c1.execute("pragma del_branch(test2)")
c1.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("test",),("sub-test1",),("sub-test2",),("renamed-branch",)])
with self.assertRaises(sqlite3.OperationalError):
c2.execute("pragma branches")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "sub-test1")
with self.assertRaises(sqlite3.OperationalError):
c2.execute("pragma branch")
c1.execute("pragma new_branch=new-one at test")
c1.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("new-one",)])
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "new-one")
c1.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("from test branch",),("val1",),(2,),(3.3,)])
'''
# try to move to the deleted branch
c1.execute("pragma branch=master")
c2.execute("pragma branch=master")
c1.execute("pragma branch")
c2.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "test")
self.assertEqual(c2.fetchone()[0], "")
c1.execute("pragma branch=sub-test1")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "sub-test1")
'''
c1.execute("pragma branch=test")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "test")
c1.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("from test branch",),("val1",),(2,),(3.3,)])
#with self.assertRaises(sqlite3.OperationalError):
# c2.execute("select * from t1")
# close and reopen the connections
conn1.close()
conn2.close()
conn1 = sqlite3.connect('file:test3.db?branches=on')
conn2 = sqlite3.connect('file:test3.db?branches=on&single_connection=true')
c1 = conn1.cursor()
c2 = conn2.cursor()
c1.execute("pragma branches")
c2.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("new-one",)])
self.assertListEqual(c2.fetchall(), [("test",),("sub-test1",),("sub-test2",),("renamed-branch",),("new-one",)])
c1.execute("pragma branch")
c2.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "test")
self.assertEqual(c2.fetchone()[0], "test")
c1.execute("select * from t1")
c2.execute("select * from t1")
self.assertListEqual(c1.fetchall(), [("first",),("from test branch",),("val1",),(2,),(3.3,)])
self.assertListEqual(c2.fetchall(), [("first",),("from test branch",),("val1",),(2,),(3.3,)])
conn1.close()
conn2.close()
def test10_rollback(self):
conn = sqlite3.connect('file:test.db?branches=on')
conn.isolation_level = None # enables autocommit mode
c = conn.cursor()
c.execute("pragma branch")
self.assertEqual(c.fetchone()[0], "master")
c.execute("begin")
c.execute("insert into t1 values ('another')")
c.execute("create table t2(name)")
c.execute("insert into t2 values ('first')")
c.execute("insert into t2 values ('second')")
c.execute("select * from t1")
self.assertListEqual(c.fetchall(), [("first",),("second",),("third",),("another",)])
c.execute("select * from t2")
self.assertListEqual(c.fetchall(), [("first",),("second",)])
conn.rollback()
with self.assertRaises(sqlite3.OperationalError):
c.execute("select * from t2")
c.execute("select name from sqlite_master")
self.assertListEqual(c.fetchall(), [("t1",)])
c.execute("select * from t1")
self.assertListEqual(c.fetchall(), [("first",),("second",),("third",)])
conn.close()
def test11_attached_dbs(self):
delete_file("test4.db")
delete_file("attached.db")
connat = sqlite3.connect('attached.db')
ca = connat.cursor()
ca.execute("pragma page_size")
self.assertEqual(ca.fetchone()[0], 4096)
ca.execute("pragma journal_mode")
self.assertEqual(ca.fetchone()[0], "delete")
ca.execute("create table t2(name)")
ca.execute("insert into t2 values ('att1')")
ca.execute("insert into t2 values ('att2')")
connat.commit()
ca.execute("select * from t2")
self.assertListEqual(ca.fetchall(), [("att1",),("att2",)])
delete_file("test1.db")
ca.execute("attach database 'test1.db' as temp1")
ca.execute("detach database temp1")
# test db with branches with attached db
conn1 = sqlite3.connect('file:test4.db?branches=on')
conn2 = sqlite3.connect('file:test4.db?branches=on')
if platform.system() == "Darwin":
conn1.isolation_level = None # enables autocommit mode
conn2.isolation_level = None # enables autocommit mode
c1 = conn1.cursor()
c2 = conn2.cursor()
c1.execute("pragma page_size")
c2.execute("pragma page_size")
self.assertEqual(c1.fetchone()[0], 4096)
self.assertEqual(c2.fetchone()[0], 4096)
c1.execute("pragma journal_mode")
c2.execute("pragma journal_mode")
self.assertEqual(c1.fetchone()[0], "branches")
self.assertEqual(c2.fetchone()[0], "branches")
c1.execute("pragma branch")
c2.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "master")
self.assertEqual(c2.fetchone()[0], "master")
# make modifications on connection 1
c1.execute("create table t1(name)")
conn1.commit()
c1.execute("insert into t1 values ('first')")
conn1.commit()
c1.execute("attach database 'attached.db' as sec")
# the new modifications should appear on connection 2
c2.execute("select * from t1")
self.assertListEqual(c2.fetchall(), [("first",)])
# the attached db and its tables should not appear on conn2
with self.assertRaises(sqlite3.OperationalError):
c2.execute("select * from t2")
with self.assertRaises(sqlite3.OperationalError):
c2.execute("select * from sec.t2")
c1.execute("select * from t2")
self.assertListEqual(c1.fetchall(), [("att1",),("att2",)])
c1.execute("select * from sec.t2")
self.assertListEqual(c1.fetchall(), [("att1",),("att2",)])
c1.execute("pragma sec.journal_mode")
self.assertEqual(c1.fetchone()[0], "delete")
c1.execute("pragma journal_mode")
self.assertEqual(c1.fetchone()[0], "branches")
# create a new branch on connection 1
c1.execute("pragma new_branch=dev at master.2")
c1.execute("pragma branch")
self.assertEqual(c1.fetchone()[0], "dev")
c1.execute("pragma branches")
self.assertListEqual(c1.fetchall(), [("master",),("dev",)])
c1.execute("insert into t1 values ('second')")
conn1.commit()
ca.execute("insert into t2 values ('att3')")
connat.commit()
c1.execute("select * from t2")
self.assertListEqual(c1.fetchall(), [("att1",),("att2",),("att3",)])
c1.execute("select * from sec.t2")
self.assertListEqual(c1.fetchall(), [("att1",),("att2",),("att3",)])
ca.execute("select * from t2")
self.assertListEqual(ca.fetchall(), [("att1",),("att2",),("att3",)])
c1.execute("insert into t2 values ('att4')")
conn1.commit()
ca.execute("select | |
[float(atom.Coords.x) for atom in forced_atomGroup[0]]
y_coordinates = [float(atom.Coords.y) for atom in forced_atomGroup[0]]
x_bins = np.arange(x_min, x_max, (x_max - x_min)/num_x_bins)
y_bins = np.arange(y_min, y_max, (y_max - y_min)/num_y_bins)
atom_x_bin = np.digitize(x_coordinates, x_bins)
atom_y_bin = np.digitize(y_coordinates, y_bins)
temp_binned_data = np.zeros((num_x_bins, num_y_bins))
forces_averaged = []
for n, atoms in enumerate(zip(*forced_atomGroup)):
average_at_coordinate = Point(0,0,0)
for atom in atoms:
average_at_coordinate += atom.Force
temp_binned_data[atom_x_bin[n] - 1][atom_y_bin[n] - 1] = average_at_coordinate.mod()/(num_frames_orignal -1)
binned_data[allKeys[i]].append(temp_binned_data)
else:
for key in binned_data_split[allKeys[i]]:
forced_atomGroup = upper_membrane if key == "Upper" else lower_membrane
x_min = math.floor(int(min([min(forced_atomGroup[windowc], key = lambda atom: atom.Coords.x) for windowc in range(window_size)], key = lambda atom: atom.Coords.x).Coords.x))
x_max = math.ceil(int(max([max(forced_atomGroup[windowc], key = lambda atom: atom.Coords.x) for windowc in range(window_size)], key = lambda atom: atom.Coords.x).Coords.x))
y_min = math.floor(int(min([min(forced_atomGroup[windowc], key = lambda atom: atom.Coords.y) for windowc in range(window_size)], key = lambda atom: atom.Coords.y).Coords.y))
y_max = math.ceil(int(max([max(forced_atomGroup[windowc], key = lambda atom: atom.Coords.y) for windowc in range(window_size)], key = lambda atom: atom.Coords.y).Coords.y))
x_coordinates = [float(atom.Coords.x) for atom in forced_atomGroup[0]]
y_coordinates = [float(atom.Coords.y) for atom in forced_atomGroup[0]]
x_bins = np.arange(x_min, x_max, (x_max - x_min)/num_x_bins)
y_bins = np.arange(y_min, y_max, (y_max - y_min)/num_y_bins)
atom_x_bin = np.digitize(x_coordinates, x_bins)
atom_y_bin = np.digitize(y_coordinates, y_bins)
temp_binned_data = np.zeros((num_x_bins, num_y_bins))
forces_averaged = []
for n, atoms in enumerate(zip(*forced_atomGroup)):
average_at_coordinate = Point(0,0,0)
for atom in atoms:
average_at_coordinate += atom.Force
temp_binned_data[atom_x_bin[n] - 1][atom_y_bin[n] - 1] = average_at_coordinate.mod()/(num_frames_orignal -1)
binned_data_split[allKeys[i]][key].append(temp_binned_data)
selected_atoms = ["P"]
curavature_plots = ["Z_Surface", "Mean_Curvature", "Gaussian_Curvature"]
if not split:
selected_residues = {}
axis_removed = {}
for i in range(len(selected_atoms)):
selected_residues[selected_atoms[i]] = {}
curvature_selected = MembraneCurvature(universe, select = f"type {selected_atoms[i]}", n_x_bins = num_x_bins, n_y_bins = num_y_bins).run() #, select = 'resid 0-1023', n_x_bins=12, n_y_bins=12
selected_residues[selected_atoms[i]]["Z_Surface"] = curvature_selected.results.z_surface
selected_residues[selected_atoms[i]]["Mean_Curvature"] = curvature_selected.results.mean
selected_residues[selected_atoms[i]]["Gaussian_Curvature"] = curvature_selected.results.gaussian
for i in range(len(selected_atoms)):
axis_removed[selected_atoms[i]] = {"Z_Surface" : list(range(num_frames)), "Mean_Curvature": list(range(num_frames)), "Gaussian_Curvature": list(range(num_frames))}
for window in range(0, num_frames, window_size):
z_sur = np.nanmean(np.array(selected_residues[selected_atoms[i]]["Z_Surface"][window : window+window_size]), axis = 0)
meanc = np.nanmean(np.array(selected_residues[selected_atoms[i]]["Mean_Curvature"][window : window+window_size]), axis = 0)
gausc = np.nanmean(np.array(selected_residues[selected_atoms[i]]["Gaussian_Curvature"][window : window+window_size]), axis = 0)
axis_removed[selected_atoms[i]]["Z_Surface"][window] = [np.unravel_index(np.nanargmax(z_sur), z_sur.shape)]
axis_removed[selected_atoms[i]]["Mean_Curvature"][window] = [np.unravel_index(np.nanargmax(meanc), meanc.shape)]
axis_removed[selected_atoms[i]]["Gaussian_Curvature"][window] = [np.unravel_index(np.nanargmax(gausc), gausc.shape)]
axis_removed[selected_atoms[i]]["Z_Surface"][window].append(np.unravel_index(np.nanargmin(z_sur), z_sur.shape))
axis_removed[selected_atoms[i]]["Mean_Curvature"][window].append(np.unravel_index(np.nanargmin(meanc), meanc.shape))
axis_removed[selected_atoms[i]]["Gaussian_Curvature"][window].append(np.unravel_index(np.nanargmin(gausc), gausc.shape))
else:
selected_residues_split = {"Upper": {}, "Lower": {}}
axis_removed_split = {"Upper": {}, "Lower": {}}
for split_key in selected_residues_split:
lower_limit = min(lower_range)[0] if split_key == "Lower" else min(upper_range)[0]
upper_limit = max(lower_range)[1] if split_key == "Lower" else max(upper_range)[1]
universe_selected = universe.select_atoms(f"prop z < {universe.select_atoms('type P').center_of_mass()[2]} and type P") if split_key == "Lower" else universe.select_atoms(f"prop z > {universe.select_atoms('type P').center_of_mass()[2]} and type P")
for i in range(len(selected_atoms)):
selected_residues_split[split_key][selected_atoms[i]] = {}
curvature_selected = MembraneCurvature(universe_selected, n_x_bins = num_x_bins, n_y_bins = num_y_bins).run()#, select = f"resid {lower_limit}:{upper_limit} and name {selected_atoms[i]}"
selected_residues_split[split_key][selected_atoms[i]]["Z_Surface"] = curvature_selected.results.z_surface
selected_residues_split[split_key][selected_atoms[i]]["Mean_Curvature"] = curvature_selected.results.mean
selected_residues_split[split_key][selected_atoms[i]]["Gaussian_Curvature"] = curvature_selected.results.gaussian
for i in range(len(selected_atoms)):
axis_removed_split[split_key][selected_atoms[i]] = {"Z_Surface" : list(range(num_frames)), "Mean_Curvature": list(range(num_frames)), "Gaussian_Curvature": list(range(num_frames))}
for window in range(0, num_frames, window_size):
z_sur = np.nanmean(np.array(selected_residues_split[split_key][selected_atoms[i]]["Z_Surface"][window : window+window_size]), axis = 0)
meanc = np.nanmean(np.array(selected_residues_split[split_key][selected_atoms[i]]["Mean_Curvature"][window : window+window_size]), axis = 0)
gausc = np.nanmean(np.array(selected_residues_split[split_key][selected_atoms[i]]["Gaussian_Curvature"][window : window+window_size]), axis = 0)
axis_removed_split[split_key][selected_atoms[i]]["Z_Surface"][window] = [np.unravel_index(np.nanargmax(z_sur), z_sur.shape)]
axis_removed_split[split_key][selected_atoms[i]]["Mean_Curvature"][window] = [np.unravel_index(np.nanargmax(meanc), meanc.shape)]
axis_removed_split[split_key][selected_atoms[i]]["Gaussian_Curvature"][window] = [np.unravel_index(np.nanargmax(gausc), gausc.shape)]
axis_removed_split[split_key][selected_atoms[i]]["Z_Surface"][window].append(np.unravel_index(np.nanargmin(z_sur), z_sur.shape))
axis_removed_split[split_key][selected_atoms[i]]["Mean_Curvature"][window].append(np.unravel_index(np.nanargmin(meanc), meanc.shape))
axis_removed_split[split_key][selected_atoms[i]]["Gaussian_Curvature"][window].append(np.unravel_index(np.nanargmin(gausc), gausc.shape))
rows = []
if not split:
for i in range(len(selected_atoms)):
window_angles = []
for windown, window in enumerate(range(0, num_frames, window_size)):
fig, ax = plt.subplots(ncols=3, nrows=3, constrained_layout=True)
for n, j in enumerate(curavature_plots):
num = 0
im = ax[num][n].contourf(selected_residues[selected_atoms[i]][j][window], cmap='PuBuGn', origin='lower')
ax[num][n].contour(selected_residues[selected_atoms[i]][j][window], cmap = 'PuBuGn', origin='lower', levels=10)
ax[num][n].set_aspect('equal')
cbar = plt.colorbar(im, ticks=[np.nanmin(selected_residues[selected_atoms[i]][j][window]), np.nanmax(selected_residues[selected_atoms[i]][j][window])], orientation='horizontal', ax = ax[num][n], shrink=0.7)
cbar.ax.tick_params(labelsize=3, width=0.5)
ax[num][n].xaxis.set_tick_params(labelsize=4)
ax[num][n].yaxis.set_tick_params(labelsize=4)
ax[num][n].set_xlabel("X - Coordinate", {'fontsize':4})
ax[num][n].set_ylabel("Y - Coordinate", {'fontsize':4})
ax[num][n].set_title(f"Average {j}", fontdict={'fontsize':5}, pad=2)
cbar.set_label(f"{j} (nm$^{-1}$)", fontsize=4, labelpad=2)
for n, j in enumerate(curavature_plots):
ax[1][n].plot(range(num_x_bins), selected_residues[selected_atoms[i]][j][window][:,axis_removed[selected_atoms[i]][j][window][0][1]], linewidth= 1, color='mediumseagreen', label="Max")
ax[1][n].plot(range(num_x_bins), selected_residues[selected_atoms[i]][j][window][:,axis_removed[selected_atoms[i]][j][window][1][1]], linewidth= 1, color='purple', label="Min")
ax[1][n].xaxis.set_tick_params(labelsize=4)
ax[1][n].yaxis.set_tick_params(labelsize=4)
ax[1][n].set_title(f"Y Bin ({axis_removed[selected_atoms[i]][j][window][0][1]},{axis_removed[selected_atoms[i]][j][window][1][1]}) Average {j} (nm$^{-1}$)", fontdict={'fontsize':5}, pad=2)
if n == 0:
ax[1][n].legend(loc="upper right", markerscale=0.3, fontsize='xx-small')
for lipid in range(numKeys):
im = ax[2][lipid].contourf(binned_data[allKeys[lipid]][windown], cmap='PuBuGn', origin='lower')
ax[2][lipid].contour(binned_data[allKeys[lipid]][windown], cmap = 'PuBuGn', origin='lower', levels=10)
ax[2][lipid].set_aspect('equal')
cbar = plt.colorbar(im, ticks=[binned_data[allKeys[lipid]][windown].min(), binned_data[allKeys[lipid]][windown].max()], orientation='horizontal', ax = ax[2][lipid], shrink=0.7)
cbar.ax.tick_params(labelsize=3, width=0.5)
ax[2][lipid].set_title(f"Forces on {allKeys[lipid]}", fontdict={'fontsize':5}, pad=2)
ax[2][lipid].set_xlabel("X - Coordinate", {'fontsize':4})
ax[2][lipid].set_ylabel("Y - Coordinate", {'fontsize':4})
ax[2][lipid].xaxis.set_tick_params(labelsize=4)
ax[2][lipid].yaxis.set_tick_params(labelsize=4)
cbar.set_label(f"Force Z (N)", fontsize=4, labelpad=2)
ax[2][2].remove()
if gangle != None:
window_angles.append((mean(gangle[list(gangle.keys())[0]][window:window+window_size]), mean(gangle[list(gangle.keys())[1]][window:window+window_size])))
plt.figtext(0.85, 0.2, f"Average Angle - \n {list(gangle.keys())[0]} : {window_angles[-1][0]} \n{list(gangle.keys())[1]} : {window_angles[-1][1]}", ha="center", fontdict={'fontsize':7})
fig.suptitle(f'Curvature Plots for Atom {selected_atoms[i]} - Window {window}-{window+window_size}', fontsize=12)
plt.savefig(f'{plot_name}_{num_x_bins}_{num_y_bins}_{selected_atoms[i]}_window_{window}_{window+window_size}.svg', dpi = 1000)
plt.close()
if len(window_angles) > 0:
angles_lipid1 = [x[0] for x in window_angles]
angles_lipid2 = [x[1] for x in window_angles]
plt.plot(range(len(angles_lipid1)), angles_lipid1, linewidth=1, color='crimson')
plt.plot(range(len(angles_lipid2)), angles_lipid2, linewidth=1, color='deepskyblue')
plt.xticks(range(len(window_angles)))
plt.xlabel("Window")
plt.ylabel("Average Angle")
plt.savefig(f'{plot_name}_angles_windowed_{window_size}.svg', dpi = 1000)
plt.close()
else:
for i in range(len(selected_atoms)):
for position in binned_data_split[allKeys[i]]:
window_angles = []
for windown, window in enumerate(range(0, num_frames, window_size)):
fig, ax = plt.subplots(ncols=3, nrows=3, constrained_layout=True)
for n, j in enumerate(curavature_plots):
num = 0
im = ax[num][n].contourf(selected_residues_split[position][selected_atoms[i]][j][window], cmap='PuBuGn', origin='lower')
ax[num][n].contour(selected_residues_split[position][selected_atoms[i]][j][window], cmap = 'PuBuGn', origin='lower', levels=10)
ax[num][n].set_aspect('equal')
cbar = plt.colorbar(im, ticks=[np.nanmin(selected_residues_split[position][selected_atoms[i]][j][window]), np.nanmax(selected_residues_split[position][selected_atoms[i]][j][window])], orientation='horizontal', ax = ax[num][n], shrink=0.7)
cbar.ax.tick_params(labelsize=3, width=0.5)
ax[num][n].xaxis.set_tick_params(labelsize=4)
ax[num][n].yaxis.set_tick_params(labelsize=4)
ax[num][n].set_xlabel("X - Coordinate", {'fontsize':4})
ax[num][n].set_ylabel("Y - Coordinate", {'fontsize':4})
ax[num][n].set_title(f"Average {j}", fontdict={'fontsize':5}, pad=2)
cbar.set_label(f"{j} (nm$^{-1}$)", fontsize=4, labelpad=2)
for n, j in enumerate(curavature_plots):
ax[1][n].plot(range(num_x_bins), selected_residues_split[position][selected_atoms[i]][j][window][:,axis_removed_split[position][selected_atoms[i]][j][window][0][1]], linewidth= 1, color='mediumseagreen', label="Max")
ax[1][n].plot(range(num_x_bins), selected_residues_split[position][selected_atoms[i]][j][window][:,axis_removed_split[position][selected_atoms[i]][j][window][1][1]], linewidth= 1, color='purple', label="Min")
ax[1][n].xaxis.set_tick_params(labelsize=4)
ax[1][n].yaxis.set_tick_params(labelsize=4)
ax[1][n].set_title(f"Y Bin ({axis_removed_split[position][selected_atoms[i]][j][window][0][1]},{axis_removed_split[position][selected_atoms[i]][j][window][1][1]}) Average {j} (nm$^{-1}$)", fontdict={'fontsize':5}, pad=2)
if n == 0:
ax[1][n].legend(loc="upper right", markerscale=0.3, fontsize='xx-small')
for lipid in range(numKeys):
im = ax[2][lipid].contourf(binned_data_split[allKeys[lipid]][position][windown], cmap='PuBuGn', origin='lower')
ax[2][lipid].contour(binned_data_split[allKeys[lipid]][position][windown], cmap = 'PuBuGn', origin='lower', levels=10)
ax[2][lipid].set_aspect('equal')
cbar = plt.colorbar(im, ticks=[binned_data_split[allKeys[lipid]][position][windown].min(), binned_data_split[allKeys[lipid]][position][windown].max()], orientation='horizontal', ax = ax[2][lipid], shrink=0.7)
cbar.ax.tick_params(labelsize=3, width=0.5)
ax[2][lipid].xaxis.set_tick_params(labelsize=4)
ax[2][lipid].yaxis.set_tick_params(labelsize=4)
ax[2][lipid].set_title(f"Forces on {allKeys[lipid]}", fontdict={'fontsize':5}, pad=2)
ax[2][lipid].set_xlabel("X - Coordinate", {'fontsize':4})
ax[2][lipid].set_ylabel("Y - Coordinate", {'fontsize':4})
cbar.set_label(f"Force Z (N)", fontsize=4, labelpad=2)
ax[2][2].remove()
if gangle != None:
window_angles.append((mean(gangle[position][list(gangle[position].keys())[0]][window:window+window_size]), mean(gangle[position][list(gangle[position].keys())[1]][window:window+window_size])))
plt.figtext(0.85, 0.2, f"Average Angle - \n {list(gangle[position].keys())[0]} : {window_angles[-1][0]} \n{list(gangle[position].keys())[1]} : {window_angles[-1][1]}", ha="center", fontdict={'fontsize':7})
fig.suptitle(f'Curvature Plots for Atom {selected_atoms[i]} {position} Membrane - Window {window}-{window+window_size}', fontsize=12)
plt.savefig(f'{plot_name}_{num_x_bins}_{num_y_bins}_{selected_atoms[i]}_{position}_window_{window}_{window+window_size}.svg', dpi = 1000)
plt.close()
if len(window_angles) > 0:
angles_lipid1 = [x[0] for x in window_angles]
angles_lipid2 = [x[1] for x in window_angles]
plt.plot(range(len(angles_lipid1)), angles_lipid1, linewidth=1, color='crimson')
plt.plot(range(len(angles_lipid2)), angles_lipid2, linewidth=1, color='deepskyblue')
plt.xticks(range(len(window_angles)))
plt.xlabel("Window")
plt.ylabel("Average Angle")
plt.legend(loc='upper left')
plt.savefig(f'{plot_name}_angles_windowed_{window_size}_{position}.svg', dpi = 1000)
plt.close()
def plot_msd(universe, select='all', msd_type='xyz', fft=True, timestep=1, start_index=None, end_index=None, plot_name="MSD"):
try:
MSD = msd.EinsteinMSD(universe,
select=select,
msd_type=msd_type,
fft=fft)
MSD.run()
lagtimes = np.arange(MSD.n_frames)*timestep
msd_result = MSD.results.timeseries
lagtimes = np.arange(MSD.n_frames)*timestep # make the lag-time axis
# plot the actual MSD
fig = plt.figure()
ax = plt.axes()
ax.plot(lagtimes, msd_result, color='blue', linestyle="-", label=r'MSD')
# calculate diffusion coefficient
start_index = 0 if start_index==None else start_index
end_index = -1 if end_index==None else end_index
linear_model = linregress(lagtimes[start_index:end_index],
msd_result[start_index:end_index])
slope = linear_model.slope
error = linear_model.rvalue
D = slope * 1/(2*MSD.dim_fac)
plt.xlabel("Frame", fontsize=10, labelpad=5)
plt.ylabel("MSD", fontsize=10, labelpad=5)
plt.title(f'Frame vs MSD (Diffusion coefficient : {D})')
plt.savefig(f"{plot_name}.svg", dpi = 1000)
plt.close()
logger.info(f"Diffution coefficient and MSD plotted")
except:
logger.error(traceback.format_exc())
sys.exit(0)
return
def gangle(trr_filename, tpr_filename, ndx_filename, pdb_filename, mda_universe, group1=None, group2=None, selection=None, grouping=None, c_atom_name="C4B", angles = False, g1="vector", g2="z", seltype="res_com", selrpos="res_com", mode="average", split = False):
random_string = ''.join(secrets.choice(string.ascii_uppercase + string.digits) for _ in range(10))
if angles == False:
if group1 == None or group2 == None:
raise ValueError('Enter valid lipid groups')
if split == False:
avg_angles = {}
membrane = {}
for i in mda_universe.atoms:
if i.resname == group1 or i.resname == group2:
if i.type == "P" or i.name == c_atom_name:
if i.resname not in membrane:
membrane[i.resname] = [i.id]
else:
membrane[i.resname].append(i.id)
ndx_filename = f'custom_membrane_{random_string}.ndx'
with open(ndx_filename, 'w') as f:
for i in membrane:
f.write(f"[ {i} ]\n")
group_str = " ".join([str(i) for i in membrane[i]])
f.write("\n".join(textwrap.wrap(group_str, 15)))
f.write("\n")
for group in [group1, group2]:
filename=f"angle_{group}_{random_string}.xvg"
subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-g1", g1, "-g2", g2, "-group1", group, "-seltype", seltype, "-selrpos", selrpos, "-oav", filename], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if mode=="average":
angle = []
with open(filename, "r") as f:
for line in f:
if (not line[0]=="#") and (not line[0]=="@"):
angle.append(float(line.split()[1]))
subprocess.run(["rm", filename])
avg_angles[group] = mean(angle)
elif mode=="framewise" or mode=="window":
angle = []
with open(filename, "r") as f:
for line in f:
if (not line[0]=="#") and (not line[0]=="@"):
angle.append(float(line.split()[1]))
subprocess.run(["rm", filename])
avg_angles[group] = angle
subprocess.run(["rm", ndx_filename])
return avg_angles
else:
membrane = {'Lower':{}, 'Upper':{}}
lower_membrane = [atom.resid for atom in mda_universe.select_atoms(f"prop z < {mda_universe.select_atoms('type P').center_of_mass()[2]} and type P")]
upper_membrane = [atom.resid for atom in mda_universe.select_atoms(f"prop z > {mda_universe.select_atoms('type P').center_of_mass()[2]} | |
<reponame>tonsky/intellij-community<filename>python/helpers/generator3.py
# encoding: utf-8
import atexit
import zipfile
from pycharm_generator_utils.clr_tools import *
from pycharm_generator_utils.util_methods import *
# TODO: Move all CLR-specific functions to clr_tools
debug_mode = True
quiet = False
# TODO move to property of Generator3 as soon as tests finished
@cached
def version():
return os.environ.get(ENV_VERSION, VERSION)
# TODO move to property of Generator3 as soon as tests finished
@cached
def required_gen_version_file_path():
return os.environ.get(ENV_REQUIRED_GEN_VERSION_FILE, os.path.join(_helpers_dir, 'required_gen_version'))
@cached
def is_test_mode():
return ENV_TEST_MODE_FLAG in os.environ
# Future generator mode where all the checks will be performed on Python side.
# Now it works in transitional mode where validity of existing SDK skeletons is checked on
# Java side (see PySkeletonRefresher), and generator itself inspects only the cache.
@cached
def is_standalone_mode():
return ENV_STANDALONE_MODE_FLAG in os.environ
@cached
def is_pregeneration_mode():
return ENV_PREGENERATION_MODE_FLAG in os.environ
_helpers_dir = os.path.dirname(os.path.abspath(__file__))
def redo_module(module_name, module_file_name, doing_builtins, cache_dir, sdk_dir=None):
# gobject does 'del _gobject' in its __init__.py, so the chained attribute lookup code
# fails to find 'gobject._gobject'. thus we need to pull the module directly out of
# sys.modules
mod = sys.modules.get(module_name)
mod_path = module_name.split('.')
if not mod and sys.platform == 'cli':
# "import System.Collections" in IronPython 2.7 doesn't actually put System.Collections in sys.modules
# instead, sys.modules['System'] get set to a Microsoft.Scripting.Actions.NamespaceTracker and Collections can be
# accessed as its attribute
mod = sys.modules[mod_path[0]]
for component in mod_path[1:]:
try:
mod = getattr(mod, component)
except AttributeError:
mod = None
report("Failed to find CLR module " + module_name)
break
if mod:
action("restoring")
from pycharm_generator_utils.module_redeclarator import ModuleRedeclarator
r = ModuleRedeclarator(mod, module_name, module_file_name, cache_dir=cache_dir, doing_builtins=doing_builtins)
create_failed_version_stamp(cache_dir, module_name)
r.redo(module_name, ".".join(mod_path[:-1]) in MODULES_INSPECT_DIR)
action("flushing")
r.flush()
delete_failed_version_stamp(cache_dir, module_name)
if sdk_dir:
# Incrementally copy whatever we managed to successfully generate so far
copy_skeletons(cache_dir, sdk_dir, get_module_origin(module_file_name, module_name))
else:
report("Failed to find imported module in sys.modules " + module_name)
# find_binaries functionality
def cut_binary_lib_suffix(path, f):
"""
@param path where f lives
@param f file name of a possible binary lib file (no path)
@return f without a binary suffix (that is, an importable name) if path+f is indeed a binary lib, or None.
Note: if for .pyc or .pyo file a .py is found, None is returned.
"""
if not f.endswith((".pyc", ".typelib", ".pyo", ".so", ".pyd")):
return None
ret = None
match = BIN_MODULE_FNAME_PAT.match(f)
if match:
ret = match.group(1)
modlen = len('module')
retlen = len(ret)
if ret.endswith('module') and retlen > modlen and f.endswith('.so'): # what for?
ret = ret[:(retlen - modlen)]
if f.endswith('.pyc') or f.endswith('.pyo'):
fullname = os.path.join(path, f[:-1]) # check for __pycache__ is made outside
if os.path.exists(fullname):
ret = None
pat_match = TYPELIB_MODULE_FNAME_PAT.match(f)
if pat_match:
ret = "gi.repository." + pat_match.group(1)
return ret
def is_posix_skipped_module(path, f):
if os.name == 'posix':
name = os.path.join(path, f)
for mod in POSIX_SKIP_MODULES:
if name.endswith(mod):
return True
return False
def is_mac_skipped_module(path, f):
fullname = os.path.join(path, f)
m = MAC_STDLIB_PATTERN.match(fullname)
if not m: return 0
relpath = m.group(2)
for module in MAC_SKIP_MODULES:
if relpath.startswith(module): return 1
return 0
def is_tensorflow_contrib_ops_module(qname):
# These modules cannot be imported directly. Instead tensorflow uses special
# tensorflow.contrib.util.loader.load_op_library() to load them and create
# Python modules at runtime. Their names in sys.modules are then md5 sums
# of the list of exported Python definitions.
return TENSORFLOW_CONTRIB_OPS_MODULE_PATTERN.match(qname)
def is_skipped_module(path, f, qname):
return (is_mac_skipped_module(path, f) or
is_posix_skipped_module(path, f[:f.rindex('.')]) or
'pynestkernel' in f or
is_tensorflow_contrib_ops_module(qname))
def is_module(d, root):
return (os.path.exists(os.path.join(root, d, "__init__.py")) or
os.path.exists(os.path.join(root, d, "__init__.pyc")) or
os.path.exists(os.path.join(root, d, "__init__.pyo")) or
is_valid_implicit_namespace_package_name(d))
def walk_python_path(path):
for root, dirs, files in os.walk(path):
if root.endswith('__pycache__'):
continue
dirs_copy = list(dirs)
for d in dirs_copy:
if d.endswith('__pycache__') or not is_module(d, root):
dirs.remove(d)
# some files show up but are actually non-existent symlinks
yield root, [f for f in files if os.path.exists(os.path.join(root, f))]
def list_binaries(paths):
"""
Finds binaries in the given list of paths.
Understands nested paths, as sys.paths have it (both "a/b" and "a/b/c").
Tries to be case-insensitive, but case-preserving.
@param paths: list of paths.
@return: dict[module_name, full_path]
"""
SEP = os.path.sep
res = {} # {name.upper(): (name, full_path)} # b/c windows is case-oblivious
if not paths:
return {}
if IS_JAVA: # jython can't have binary modules
return {}
paths = sorted_no_case(paths)
for path in paths:
if path == os.path.dirname(sys.argv[0]): continue
for root, files in walk_python_path(path):
cutpoint = path.rfind(SEP)
if cutpoint > 0:
preprefix = path[(cutpoint + len(SEP)):] + '.'
else:
preprefix = ''
prefix = root[(len(path) + len(SEP)):].replace(SEP, '.')
if prefix:
prefix += '.'
binaries = ((f, cut_binary_lib_suffix(root, f)) for f in files)
binaries = [(f, name) for (f, name) in binaries if name]
if binaries:
note("root: %s path: %s prefix: %s preprefix: %s", root, path, prefix, preprefix)
for f, name in binaries:
the_name = prefix + name
if is_skipped_module(root, f, the_name):
note('skipping module %s' % the_name)
continue
note("cutout: %s", name)
if preprefix:
note("prefixes: %s %s", prefix, preprefix)
pre_name = (preprefix + prefix + name).upper()
if pre_name in res:
res.pop(pre_name) # there might be a dupe, if paths got both a/b and a/b/c
note("done with %s", name)
file_path = os.path.join(root, f)
res[the_name.upper()] = (the_name,
file_path,
os.path.getsize(file_path),
file_modification_timestamp(file_path))
return list(res.values())
def file_modification_timestamp(path):
return int(os.stat(path).st_mtime)
def is_source_file(path):
# Skip directories, character and block special devices, named pipes
# Do not skip regular files and symbolic links to regular files
if not os.path.isfile(path):
return False
# Want to see that files regardless of their encoding.
if path.endswith(('-nspkg.pth', '.html', '.pxd', '.py', '.pyi', '.pyx')):
return True
has_bad_extension = path.endswith((
# plotlywidget/static/index.js.map is 8.7 MiB.
# Many map files from notebook are near 2 MiB.
'.js.map',
# uvloop/loop.c contains 6.4 MiB of code.
# Some header files from tensorflow has size more than 1 MiB.
'.h', '.c',
# Test data of pycrypto, many files are near 1 MiB.
'.rsp',
# No need to read these files even if they are small.
'.dll', '.pyc', '.pyd', '.pyo', '.so',
))
if has_bad_extension:
return False
return is_text_file(path)
def list_sources(paths):
# noinspection PyBroadException
try:
for path in paths:
if path == os.path.dirname(sys.argv[0]): continue
path = os.path.normpath(path)
if path.endswith('.egg') and os.path.isfile(path):
say("%s\t%s\t%d", path, path, os.path.getsize(path))
for root, files in walk_python_path(path):
for name in files:
file_path = os.path.join(root, name)
if is_source_file(file_path):
say("%s\t%s\t%d", os.path.normpath(file_path), path, os.path.getsize(file_path))
say('END')
sys.stdout.flush()
except:
import traceback
traceback.print_exc()
sys.exit(1)
# noinspection PyBroadException
def zip_sources(zip_path):
if not os.path.exists(zip_path):
os.makedirs(zip_path)
zip_filename = os.path.normpath(os.path.sep.join([zip_path, "skeletons.zip"]))
try:
zip = zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED)
except:
zip = zipfile.ZipFile(zip_filename, 'w')
try:
try:
while True:
line = sys.stdin.readline()
if not line:
# TextIOWrapper.readline returns an empty string if EOF is hit immediately.
break
line = line.strip()
if line == '-':
break
if line:
# This line will break the split:
# /.../dist-packages/setuptools/script template (dev).py setuptools/script template (dev).py
split_items = line.split()
if len(split_items) > 2:
match_two_files = re.match(r'^(.+\.py)\s+(.+\.py)$', line)
if not match_two_files:
report("Error(zip_sources): invalid line '%s'" % line)
continue
split_items = match_two_files.group(1, 2)
(path, arcpath) = split_items
zip.write(path, arcpath)
say('OK: ' + zip_filename)
sys.stdout.flush()
except:
import traceback
traceback.print_exc()
say('Error creating archive.')
sys.exit(1)
finally:
zip.close()
def add_to_zip(zip, paths):
# noinspection PyBroadException
try:
for path in paths:
print("Walking root %s" % path)
if path == os.path.dirname(sys.argv[0]): continue
path = os.path.normpath(path)
if path.endswith('.egg') and os.path.isfile(path):
pass # TODO: handle eggs
for root, files in walk_python_path(path):
for name in files:
if name.endswith('.py') or name.endswith('-nspkg.pth'):
file_path = os.path.join(root, name)
arcpath = os.path.relpath(file_path, path)
zip.write(file_path, os.path.join(str(hash(path)), arcpath))
except:
import traceback
traceback.print_exc()
sys.exit(1)
def zip_stdlib(zip_path):
if not os.path.exists(zip_path):
os.makedirs(zip_path)
import platform
zip_filename = os.path.normpath(os.path.sep.join([zip_path, "%s-%s-stdlib-%s.zip" % (
'Anaconda' if sys.version.find('Anaconda') != -1 else 'Python',
'.'.join(map(str, sys.version_info)),
platform.platform())]))
print("Adding file to %s" % zip_filename)
try:
zip = zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED)
except:
zip = zipfile.ZipFile(zip_filename, 'w')
try:
add_to_zip(zip, sys.path)
finally:
zip.close()
def build_cache_dir_path(subdir, mod_qname, mod_path):
return os.path.join(subdir, module_hash(mod_qname, mod_path))
def module_hash(mod_qname, mod_path):
# Hash the content of a physical module
if mod_path:
hash_ = physical_module_hash(mod_path)
else:
hash_ = builtin_module_hash(mod_qname)
# Use shorter hashes in test data as it might affect developers on Windows
if is_test_mode():
return hash_[:10]
return hash_
def builtin_module_hash(mod_qname):
# Hash the content of interpreter executable, i.e. it will be the same for all built-in modules.
# Also, it's the same for a virtualenv interpreter and its base.
with fopen(sys.executable, 'rb') as f:
return sha256_digest(f)
def physical_module_hash(mod_path):
with fopen(mod_path, 'rb') as f:
return sha256_digest(f)
def version_to_tuple(version):
return tuple(map(int, version.split('.')))
class OriginType(object):
FILE = 'FILE'
BUILTIN = '(built-in)'
PREGENERATED = '(pre-generated)'
class SkeletonStatus(object):
| |
<reponame>Bob-Chou/analytics-zoo
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import zoo.pipeline.api.autograd as autograd
from zoo.feature.image import ImageSet
from zoo.feature.text import TextSet
from zoo.pipeline.api.keras.base import ZooKerasLayer
from zoo.pipeline.api.keras.utils import *
from bigdl.nn.layer import Layer
if sys.version >= '3':
long = int
unicode = str
class KerasNet(ZooKerasLayer):
def save(self, path, over_write=False):
raise Exception("This is a deprecated method. Please use saveModel instead.")
def saveModel(self, modelPath, weightPath=None, over_write=False):
"""
Save this module to path with protobuf format.
:param modelPath: The path to save module, local file system,
HDFS and Amazon S3 is supported.
HDFS path should be like "hdfs://[host]:[port]/xxx"
Amazon S3 path should be like "s3a://bucket/xxx"
:param weightPath: The Path for the parameters
:param over_write: override the existing model on modelPath or not.
"""
super(KerasNet, self).saveModel(modelPath=modelPath,
weightPath=weightPath,
over_write=over_write)
def compile(self, optimizer, loss, metrics=None):
"""
Configure the learning process. It MUST be called before fit or evaluate.
# Arguments
optimizer: Optimization method to be used. One can alternatively pass in the corresponding
string representation, such as 'sgd'.
loss: Criterion to be used. One can alternatively pass in the corresponding string
representation, such as 'mse'.
metrics: List of validation methods to be used. Default is None if no validation is needed.
For convenience, string representations are supported: 'accuracy' (or 'acc'),
'top5accuracy' (or 'top5acc'), 'mae', 'auc', 'treennaccuracy' and 'loss'.
For example, you can either use [Accuracy()] or ['accuracy'].
"""
if isinstance(optimizer, six.string_types):
optimizer = to_bigdl_optim_method(optimizer)
criterion = loss
if isinstance(loss, six.string_types):
criterion = to_bigdl_criterion(loss)
if callable(loss):
from zoo.pipeline.api.autograd import CustomLoss
criterion = CustomLoss(loss, self.get_output_shape()[1:])
if metrics and all(isinstance(metric, six.string_types) for metric in metrics):
metrics = to_bigdl_metrics(metrics, loss)
callBigDlFunc(self.bigdl_type, "zooCompile",
self.value,
optimizer,
criterion,
metrics)
def set_tensorboard(self, log_dir, app_name):
"""
Set summary information during the training process for visualization purposes.
Saved summary can be viewed via TensorBoard.
In order to take effect, it needs to be called before fit.
Training summary will be saved to 'log_dir/app_name/train'
and validation summary (if any) will be saved to 'log_dir/app_name/validation'.
# Arguments
log_dir: The base directory path to store training and validation logs.
app_name: The name of the application.
"""
callBigDlFunc(self.bigdl_type, "zooSetTensorBoard",
self.value,
log_dir,
app_name)
def get_train_summary(self, tag=None):
"""
Get the scalar from model train summary
Return 2-D array like object which could be converted
by nd.array()
# Arguments
tag: The string variable represents the scalar wanted
"""
# exception handle
if tag != "Loss" and tag != "LearningRate" and tag != "Throughput":
raise TypeError('Only "Loss", "LearningRate", "Throughput"'
+ 'are supported in train summary')
return callBigDlFunc(self.bigdl_type, "zooGetScalarFromSummary",
self.value, tag, "Train")
def get_validation_summary(self, tag=None):
"""
Get the scalar from model validation summary
Return 2-D array like object which could be converted
by np.array()
# Arguments
tag: The string variable represents the scalar wanted
"""
validation_set = set(('AUC', 'Accuracy', 'BinaryAccuracy', 'CategoricalAccuracy',
'HitRatio', 'Loss', 'MAE', 'NDCG', 'SparseCategoricalAccuracy',
'TFValidationMethod', 'Top1Accuracy',
'Top5Accuracy', 'TreeNNAccuracy'))
if tag not in validation_set:
raise TypeError('Only subclasses of ValidationMethod are supported,'
+ 'which are ' + str(validation_set))
return callBigDlFunc(self.bigdl_type, "zooGetScalarFromSummary",
self.value, tag, "Validation")
def set_checkpoint(self, path, over_write=True):
"""
Configure checkpoint settings to write snapshots every epoch during the training process.
In order to take effect, it needs to be called before fit.
# Arguments
path: The path to save snapshots. Make sure this path exists beforehand.
over_write: Whether to overwrite existing snapshots in the given path. Default is True.
"""
callBigDlFunc(self.bigdl_type, "zooSetCheckpoint",
self.value,
path,
over_write)
def clear_gradient_clipping(self):
"""
Clear gradient clipping parameters. In this case, gradient clipping will not be applied.
In order to take effect, it needs to be called before fit.
"""
callBigDlFunc(self.bigdl_type, "zooClearGradientClipping",
self.value)
def set_constant_gradient_clipping(self, min, max):
"""
Set constant gradient clipping during the training process.
In order to take effect, it needs to be called before fit.
# Arguments
min: The minimum value to clip by. Float.
max: The maximum value to clip by. Float.
"""
callBigDlFunc(self.bigdl_type, "zooSetConstantGradientClipping",
self.value,
float(min),
float(max))
def set_gradient_clipping_by_l2_norm(self, clip_norm):
"""
Clip gradient to a maximum L2-Norm during the training process.
In order to take effect, it needs to be called before fit.
# Arguments
clip_norm: Gradient L2-Norm threshold. Float.
"""
callBigDlFunc(self.bigdl_type, "zooSetGradientClippingByL2Norm",
self.value,
float(clip_norm))
def set_evaluate_status(self):
"""
Set the model to be in evaluate status, i.e. remove the effect of Dropout, etc.
"""
callBigDlFunc(self.bigdl_type, "zooSetEvaluateStatus",
self.value)
return self
def fit(self, x, y=None, batch_size=32, nb_epoch=10,
validation_split=0, validation_data=None, distributed=True):
"""
Train a model for a fixed number of epochs on a DataSet.
# Arguments
x: Input data. A Numpy array or RDD of Sample, ImageSet or TextSet.
y: Labels. A Numpy array. Default is None if x is already Sample RDD or ImageSet or TextSet.
batch_size: Number of samples per gradient update. Default is 32.
nb_epoch: Number of epochs to train.
validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays.
Can also be RDD of Sample or ImageSet or TextSet.
Default is None if no validation is involved.
distributed: Boolean. Whether to train the model in distributed mode or local mode.
Default is True. In local mode, x and y must both be Numpy arrays.
"""
if distributed:
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
if validation_data:
validation_data = to_sample_rdd(*validation_data)
elif validation_split != 0:
if validation_split > 1 or validation_split < 0:
raise TypeError("validation split must in range [0, 1]")
split_index = int(len(x) * (1 - validation_split))
validation_data = (x[split_index:], y[split_index:])
x, y = x[:split_index], y[:split_index]
validation_data = to_sample_rdd(*validation_data)
training_data = to_sample_rdd(x, y)
elif (isinstance(x, RDD) or isinstance(x, ImageSet) or isinstance(x, TextSet))\
and not y:
training_data = x
else:
raise TypeError("Unsupported training data type: %s" % type(x))
callBigDlFunc(self.bigdl_type, "zooFit",
self.value,
training_data,
batch_size,
nb_epoch,
validation_data)
else:
if validation_data:
val_x = [JTensor.from_ndarray(x) for x in to_list(validation_data[0])]
val_y = JTensor.from_ndarray(validation_data[1])
else:
val_x, val_y = None, None
callBigDlFunc(self.bigdl_type, "zooFit",
self.value,
[JTensor.from_ndarray(x) for x in to_list(x)],
JTensor.from_ndarray(y),
batch_size,
nb_epoch,
val_x,
val_y)
def evaluate(self, x, y=None, batch_size=32):
"""
Evaluate a model on a given dataset in distributed mode.
# Arguments
x: Evaluation data. A Numpy array or RDD of Sample or ImageSet or TextSet.
y: Labels. A Numpy array.
Default is None if x is already Sample RDD or ImageSet or TextSet.
batch_size: Number of samples per batch. Default is 32.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
data = to_sample_rdd(x, y)
elif (isinstance(x, RDD) or isinstance(x, ImageSet) or isinstance(x, TextSet)) and not y:
data = x
else:
raise TypeError("Unsupported evaluation data type: %s" % type(x))
return callBigDlFunc(self.bigdl_type, "zooEvaluate",
self.value,
data,
batch_size)
def forward(self, input):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding output of the module
:param input: ndarray or list of ndarray
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
output = callBigDlFunc(self.bigdl_type,
"zooForward",
self.value,
jinput,
input_is_table)
return self.convert_output(output)
@staticmethod
def convert_output(output):
if type(output) is JTensor:
return output.to_ndarray()
elif len(output) == 1:
return KerasNet.convert_output(output[0])
else:
return [KerasNet.convert_output(x) for x in output]
def predict(self, x, batch_per_thread=4, distributed=True):
"""
Use a model to do prediction.
# Arguments
x: Prediction data. A Numpy array or RDD of Sample or ImageSet.
batch_per_thread:
The default value is 4.
When distributed is True,the total batch size is batch_per_thread * rdd.getNumPartitions.
When distributed is False the total batch size is batch_per_thread * numOfCores.
distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
"""
if isinstance(x, ImageSet) or isinstance(x, TextSet):
results = callBigDlFunc(self.bigdl_type, "zooPredict",
self.value,
x,
batch_per_thread)
| |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'FormGuncellee.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class MyyUi_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1173, 1173)
MainWindow.setStyleSheet("/* ---------------------------------------------------------------------------\n"
"\n"
" Created by the qtsass compiler v0.1.1\n"
"\n"
" The definitions are in the \"qdarkstyle.qss._styles.scss\" module\n"
"\n"
" WARNING! All changes made in this file will be lost!\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"/* QDarkStyleSheet -----------------------------------------------------------\n"
"\n"
"This is the main style sheet, the palette has nine colors.\n"
"\n"
"It is based on three selecting colors, three greyish (background) colors\n"
"plus three whitish (foreground) colors. Each set of widgets of the same\n"
"type have a header like this:\n"
"\n"
" ------------------\n"
" GroupName --------\n"
" ------------------\n"
"\n"
"And each widget is separated with a header like this:\n"
"\n"
" QWidgetName ------\n"
"\n"
"This makes more easy to find and change some css field. The basic\n"
"configuration is described bellow.\n"
"\n"
" BACKGROUND -----------\n"
"\n"
" Light (unpressed)\n"
" Normal (border, disabled, pressed, checked, toolbars, menus)\n"
" Dark (background)\n"
"\n"
" FOREGROUND -----------\n"
"\n"
" Light (texts/labels)\n"
" Normal (not used yet)\n"
" Dark (disabled texts)\n"
"\n"
" SELECTION ------------\n"
"\n"
" Light (selection/hover/active)\n"
" Normal (selected)\n"
" Dark (selected disabled)\n"
"\n"
"If a stranger configuration is required because of a bugfix or anything\n"
"else, keep the comment on the line above so nobody changes it, including the\n"
"issue number.\n"
"\n"
"*/\n"
"/*\n"
"\n"
"See Qt documentation:\n"
"\n"
" - https://doc.qt.io/qt-5/stylesheet.html\n"
" - https://doc.qt.io/qt-5/stylesheet-reference.html\n"
" - https://doc.qt.io/qt-5/stylesheet-examples.html\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"/* QWidget ----------------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QWidget {\n"
" background-color: #19232D;\n"
" border: 0px solid #32414B;\n"
" padding: 0px;\n"
" color: #F0F0F0;\n"
" selection-background-color: #1464A0;\n"
" selection-color: #F0F0F0;\n"
"}\n"
"\n"
"QWidget:disabled {\n"
" background-color: #19232D;\n"
" color: #787878;\n"
" selection-background-color: #14506E;\n"
" selection-color: #787878;\n"
"}\n"
"\n"
"QWidget::item:selected {\n"
" background-color: #1464A0;\n"
"}\n"
"\n"
"QWidget::item:hover {\n"
" background-color: #148CD2;\n"
" color: #32414B;\n"
"}\n"
"\n"
"/* QMainWindow ------------------------------------------------------------\n"
"\n"
"This adjusts the splitter in the dock widget, not qsplitter\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qmainwindow\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QMainWindow::separator {\n"
" background-color: #32414B;\n"
" border: 0px solid #19232D;\n"
" spacing: 0px;\n"
" padding: 2px;\n"
"}\n"
"\n"
"QMainWindow::separator:hover {\n"
" background-color: #505F69;\n"
" border: 0px solid #148CD2;\n"
"}\n"
"\n"
"QMainWindow::separator:horizontal {\n"
" width: 5px;\n"
" margin-top: 2px;\n"
" margin-bottom: 2px;\n"
" image: url(\":/qss_icons/rc/toolbar_separator_vertical.png\");\n"
"}\n"
"\n"
"QMainWindow::separator:vertical {\n"
" height: 5px;\n"
" margin-left: 2px;\n"
" margin-right: 2px;\n"
" image: url(\":/qss_icons/rc/toolbar_separator_horizontal.png\");\n"
"}\n"
"\n"
"/* QToolTip ---------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qtooltip\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QToolTip {\n"
" background-color: #148CD2;\n"
" border: 1px solid #19232D;\n"
" color: #19232D;\n"
" /* Remove padding, for fix combo box tooltip */\n"
" padding: 0px;\n"
" /* Remove opacity, fix #174 - may need to use RGBA */\n"
"}\n"
"\n"
"/* QStatusBar -------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qstatusbar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QStatusBar {\n"
" border: 1px solid #32414B;\n"
" /* Fixes Spyder #9120, #9121 */\n"
" background: #32414B;\n"
" /* Fixes #205, white vertical borders separating items */\n"
"}\n"
"\n"
"QStatusBar::item {\n"
" border: none;\n"
"}\n"
"\n"
"QStatusBar QToolTip {\n"
" background-color: #148CD2;\n"
" border: 1px solid #19232D;\n"
" color: #19232D;\n"
" /* Remove padding, for fix combo box tooltip */\n"
" padding: 0px;\n"
" /* Reducing transparency to read better */\n"
" opacity: 230;\n"
"}\n"
"\n"
"QStatusBar QLabel {\n"
" /* Fixes Spyder #9120, #9121 */\n"
" background: transparent;\n"
"}\n"
"\n"
"/* QCheckBox --------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qcheckbox\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QCheckBox {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" spacing: 4px;\n"
" outline: none;\n"
" padding-top: 4px;\n"
" padding-bottom: 4px;\n"
"}\n"
"\n"
"QCheckBox:focus {\n"
" border: none;\n"
"}\n"
"\n"
"QCheckBox QWidget:disabled {\n"
" background-color: #19232D;\n"
" color: #787878;\n"
"}\n"
"\n"
"QCheckBox::indicator {\n"
" margin-left: 4px;\n"
" height: 16px;\n"
" width: 16px;\n"
"}\n"
"\n"
"QCheckBox::indicator:unchecked {\n"
" image: url(\":/qss_icons/rc/checkbox_unchecked.png\");\n"
"}\n"
"\n"
"QCheckBox::indicator:unchecked:hover, QCheckBox::indicator:unchecked:focus, QCheckBox::indicator:unchecked:pressed {\n"
" border: none;\n"
" image: url(\":/qss_icons/rc/checkbox_unchecked_focus.png\");\n"
"}\n"
"\n"
"QCheckBox::indicator:unchecked:disabled {\n"
" image: url(\":/qss_icons/rc/checkbox_unchecked_disabled.png\");\n"
"}\n"
"\n"
"QCheckBox::indicator:checked {\n"
" image: url(\":/qss_icons/rc/checkbox_checked.png\");\n"
"}\n"
"\n"
"QCheckBox::indicator:checked:hover, QCheckBox::indicator:checked:focus, QCheckBox::indicator:checked:pressed {\n"
" border: none;\n"
" image: url(\":/qss_icons/rc/checkbox_checked_focus.png\");\n"
"}\n"
"\n"
"QCheckBox::indicator:checked:disabled {\n"
" image: url(\":/qss_icons/rc/checkbox_checked_disabled.png\");\n"
"}\n"
"\n"
"QCheckBox::indicator:indeterminate {\n"
" image: url(\":/qss_icons/rc/checkbox_indeterminate.png\");\n"
"}\n"
"\n"
"QCheckBox::indicator:indeterminate:disabled {\n"
" image: url(\":/qss_icons/rc/checkbox_indeterminate_disabled.png\");\n"
"}\n"
"\n"
"QCheckBox::indicator:indeterminate:focus, QCheckBox::indicator:indeterminate:hover, QCheckBox::indicator:indeterminate:pressed {\n"
" image: url(\":/qss_icons/rc/checkbox_indeterminate_focus.png\");\n"
"}\n"
"\n"
"/* QGroupBox --------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qgroupbox\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QGroupBox {\n"
" font-weight: bold;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
" padding: 4px;\n"
" margin-top: 16px;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" subcontrol-position: top left;\n"
" left: 3px;\n"
" padding-left: 3px;\n"
" padding-right: 5px;\n"
" padding-top: 8px;\n"
" padding-bottom: 16px;\n"
"}\n"
"\n"
"QGroupBox::indicator {\n"
" margin-left: 2px;\n"
" height: 16px;\n"
" width: 16px;\n"
"}\n"
"\n"
"QGroupBox::indicator:unchecked {\n"
" border: none;\n"
" image: url(\":/qss_icons/rc/checkbox_unchecked.png\");\n"
"}\n"
"\n"
"QGroupBox::indicator:unchecked:hover, QGroupBox::indicator:unchecked:focus, QGroupBox::indicator:unchecked:pressed {\n"
" border: none;\n"
" image: url(\":/qss_icons/rc/checkbox_unchecked_focus.png\");\n"
"}\n"
"\n"
"QGroupBox::indicator:unchecked:disabled {\n"
" image: url(\":/qss_icons/rc/checkbox_unchecked_disabled.png\");\n"
"}\n"
"\n"
"QGroupBox::indicator:checked {\n"
" border: none;\n"
" image: url(\":/qss_icons/rc/checkbox_checked.png\");\n"
"}\n"
"\n"
"QGroupBox::indicator:checked:hover, QGroupBox::indicator:checked:focus, QGroupBox::indicator:checked:pressed {\n"
" border: none;\n"
" image: url(\":/qss_icons/rc/checkbox_checked_focus.png\");\n"
"}\n"
"\n"
"QGroupBox::indicator:checked:disabled {\n"
" image: url(\":/qss_icons/rc/checkbox_checked_disabled.png\");\n"
"}\n"
"\n"
"/* QRadioButton -----------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qradiobutton\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QRadioButton {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" spacing: 4px;\n"
" padding: 0px;\n"
" border: none;\n"
" outline: none;\n"
"}\n"
"\n"
"QRadioButton:focus {\n"
" border: none;\n"
"}\n"
"\n"
"QRadioButton:disabled {\n"
" background-color: #19232D;\n"
" color: #787878;\n"
" border: none;\n"
" outline: none;\n"
"}\n"
"\n"
"QRadioButton QWidget {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" spacing: 0px;\n"
" padding: 0px;\n"
" outline: none;\n"
" border: none;\n"
"}\n"
"\n"
"QRadioButton::indicator {\n"
" border: none;\n"
" outline: none;\n"
" margin-left: 4px;\n"
" height: 16px;\n"
" width: 16px;\n"
"}\n"
"\n"
"QRadioButton::indicator:unchecked {\n"
" image: url(\":/qss_icons/rc/radio_unchecked.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:unchecked:hover, QRadioButton::indicator:unchecked:focus, QRadioButton::indicator:unchecked:pressed {\n"
" border: none;\n"
" outline: none;\n"
" image: url(\":/qss_icons/rc/radio_unchecked_focus.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:unchecked:disabled {\n"
" image: url(\":/qss_icons/rc/radio_unchecked_disabled.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:checked {\n"
" border: none;\n"
" outline: none;\n"
" image: url(\":/qss_icons/rc/radio_checked.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:checked:hover, QRadioButton::indicator:checked:focus, QRadioButton::indicator:checked:pressed {\n"
" border: none;\n"
" outline: none;\n"
" image: url(\":/qss_icons/rc/radio_checked_focus.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:checked:disabled {\n"
" outline: none;\n"
" image: url(\":/qss_icons/rc/radio_checked_disabled.png\");\n"
"}\n"
"\n"
"/* QMenuBar ---------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qmenubar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QMenuBar {\n"
" background-color: #32414B;\n"
" padding: 2px;\n"
" border: 1px solid #19232D;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QMenuBar:focus {\n"
" border: 1px solid #148CD2;\n"
"}\n"
"\n"
"QMenuBar::item {\n"
" background: transparent;\n"
" padding: 4px;\n"
"}\n"
"\n"
"QMenuBar::item:selected {\n"
" padding: 4px;\n"
" background: transparent;\n"
" border: 0px solid #32414B;\n"
"}\n"
"\n"
"QMenuBar::item:pressed {\n"
" padding: 4px;\n"
" border: 0px solid #32414B;\n"
" background-color: #148CD2;\n"
" color: #F0F0F0;\n"
" margin-bottom: 0px;\n"
" padding-bottom: 0px;\n"
"}\n"
"\n"
"/* QMenu ------------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qmenu\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QMenu {\n"
" border: 0px solid #32414B;\n"
" color: #F0F0F0;\n"
" margin: 0px;\n"
"}\n"
"\n"
"QMenu::separator {\n"
" height: 1px;\n"
" background-color: #505F69;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QMenu::icon {\n"
" margin: 0px;\n"
" padding-left: 8px;\n"
"}\n"
"\n"
"QMenu::item {\n"
" background-color: #32414B;\n"
" padding: 4px 24px 4px 24px;\n"
" /* Reserve space for selection border */\n"
" border: 1px transparent #32414B;\n"
"}\n"
"\n"
"QMenu::item:selected {\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QMenu::indicator {\n"
" width: 12px;\n"
" height: 12px;\n"
" padding-left: 6px;\n"
" /* non-exclusive indicator = check box style indicator (see QActionGroup::setExclusive) */\n"
" /* exclusive indicator = radio button style indicator (see QActionGroup::setExclusive) */\n"
"}\n"
"\n"
"QMenu::indicator:non-exclusive:unchecked {\n"
" image: url(\":/qss_icons/rc/checkbox_unchecked.png\");\n"
"}\n"
"\n"
"QMenu::indicator:non-exclusive:unchecked:selected {\n"
" image: url(\":/qss_icons/rc/checkbox_unchecked_disabled.png\");\n"
"}\n"
"\n"
"QMenu::indicator:non-exclusive:checked {\n"
" image: url(\":/qss_icons/rc/checkbox_checked.png\");\n"
"}\n"
"\n"
"QMenu::indicator:non-exclusive:checked:selected {\n"
" image: url(\":/qss_icons/rc/checkbox_checked_disabled.png\");\n"
"}\n"
"\n"
"QMenu::indicator:exclusive:unchecked {\n"
" image: url(\":/qss_icons/rc/radio_unchecked.png\");\n"
"}\n"
"\n"
"QMenu::indicator:exclusive:unchecked:selected {\n"
" image: url(\":/qss_icons/rc/radio_unchecked_disabled.png\");\n"
"}\n"
"\n"
"QMenu::indicator:exclusive:checked {\n"
" image: url(\":/qss_icons/rc/radio_checked.png\");\n"
"}\n"
"\n"
"QMenu::indicator:exclusive:checked:selected {\n"
" image: url(\":/qss_icons/rc/radio_checked_disabled.png\");\n"
"}\n"
"\n"
"QMenu::right-arrow {\n"
" margin: 5px;\n"
" image: url(\":/qss_icons/rc/arrow_right.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
"}\n"
"\n"
"/* QAbstractItemView ------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qcombobox\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QAbstractItemView {\n"
" alternate-background-color: #19232D;\n"
" color: #F0F0F0;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QAbstractItemView QLineEdit {\n"
" padding: 2px;\n"
"}\n"
"\n"
"/* QAbstractScrollArea ----------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qabstractscrollarea\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QAbstractScrollArea {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
" padding: 2px;\n"
" /* fix #159 */\n"
" min-height: 1.25em;\n"
" /* fix #159 */\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QAbstractScrollArea:disabled {\n"
" color: #787878;\n"
"}\n"
"\n"
"/* QScrollArea ------------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QScrollArea QWidget QWidget:disabled {\n"
" background-color: #19232D;\n"
"}\n"
"\n"
"/* QScrollBar -------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qscrollbar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QScrollBar:horizontal {\n"
" height: 16px;\n"
" margin: 2px 16px 2px 16px;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
" background-color: #19232D;\n"
"}\n"
"\n"
"QScrollBar:vertical {\n"
" background-color: #19232D;\n"
" width: 16px;\n"
" margin: 16px 2px 16px 2px;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QScrollBar::handle:horizontal {\n"
" background-color: #787878;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
" min-width: 8px;\n"
"}\n"
"\n"
"QScrollBar::handle:horizontal:hover {\n"
" background-color: #148CD2;\n"
" border: 1px solid #148CD2;\n"
" border-radius: 4px;\n"
" min-width: 8px;\n"
"}\n"
"\n"
"QScrollBar::handle:horizontal:focus {\n"
" border: 1px solid #1464A0;\n"
"}\n"
"\n"
"QScrollBar::handle:vertical {\n"
" background-color: #787878;\n"
" border: 1px solid #32414B;\n"
" min-height: 8px;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QScrollBar::handle:vertical:hover {\n"
" background-color: #148CD2;\n"
" border: 1px solid #148CD2;\n"
" border-radius: 4px;\n"
" min-height: 8px;\n"
"}\n"
"\n"
"QScrollBar::handle:vertical:focus {\n"
" border: 1px solid #1464A0;\n"
"}\n"
"\n"
"QScrollBar::add-line:horizontal {\n"
" margin: 0px 0px 0px 0px;\n"
" border-image: url(\":/qss_icons/rc/arrow_right_disabled.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::add-line:horizontal:hover, QScrollBar::add-line:horizontal:on {\n"
" border-image: url(\":/qss_icons/rc/arrow_right.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::add-line:vertical {\n"
" margin: 3px 0px 3px 0px;\n"
" border-image: url(\":/qss_icons/rc/arrow_down_disabled.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::add-line:vertical:hover, QScrollBar::add-line:vertical:on {\n"
" border-image: url(\":/qss_icons/rc/arrow_down.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:horizontal {\n"
" margin: 0px 3px 0px 3px;\n"
" border-image: url(\":/qss_icons/rc/arrow_left_disabled.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:horizontal:hover, QScrollBar::sub-line:horizontal:on {\n"
" border-image: url(\":/qss_icons/rc/arrow_left.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:vertical {\n"
" margin: 3px 0px 3px 0px;\n"
" border-image: url(\":/qss_icons/rc/arrow_up_disabled.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:vertical:hover, QScrollBar::sub-line:vertical:on {\n"
" border-image: url(\":/qss_icons/rc/arrow_up.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal {\n"
" background: none;\n"
"}\n"
"\n"
"QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {\n"
" background: none;\n"
"}\n"
"\n"
"QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {\n"
" background: none;\n"
"}\n"
"\n"
"QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\n"
" background: none;\n"
"}\n"
"\n"
"/* QTextEdit --------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-specific-widgets\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QTextEdit {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" border-radius: 4px;\n"
" border: 1px solid #32414B;\n"
"}\n"
"\n"
"QTextEdit:hover {\n"
" border: 1px solid #148CD2;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QTextEdit:focus {\n"
" border: 1px solid #1464A0;\n"
"}\n"
"\n"
"QTextEdit:selected {\n"
" background: #1464A0;\n"
" color: #32414B;\n"
"}\n"
"\n"
"/* QPlainTextEdit ---------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QPlainTextEdit {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" border-radius: 4px;\n"
" border: 1px solid #32414B;\n"
"}\n"
"\n"
"QPlainTextEdit:hover {\n"
" border: 1px solid #148CD2;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QPlainTextEdit:focus {\n"
" border: 1px solid #1464A0;\n"
"}\n"
"\n"
"QPlainTextEdit:selected {\n"
" background: #1464A0;\n"
" color: #32414B;\n"
"}\n"
"\n"
"/* QSizeGrip --------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qsizegrip\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QSizeGrip {\n"
" background: transparent;\n"
" width: 12px;\n"
" height: 12px;\n"
" image: url(\":/qss_icons/rc/window_grip.png\");\n"
"}\n"
"\n"
"/* QStackedWidget ---------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QStackedWidget {\n"
" padding: 2px;\n"
" border: 1px solid #32414B;\n"
" border: 1px solid #19232D;\n"
"}\n"
"\n"
"/* QToolBar ---------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qtoolbar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QToolBar {\n"
" background-color: #32414B;\n"
" border-bottom: 1px solid #19232D;\n"
" padding: 2px;\n"
" font-weight: bold;\n"
" spacing: 2px;\n"
"}\n"
"\n"
"QToolBar QToolButton {\n"
" background-color: #32414B;\n"
" border: 1px solid #32414B;\n"
"}\n"
"\n"
"QToolBar QToolButton:hover {\n"
" border: 1px solid #148CD2;\n"
"}\n"
"\n"
"QToolBar QToolButton:checked {\n"
" border: 1px solid #19232D;\n"
" background-color: #19232D;\n"
"}\n"
"\n"
"QToolBar QToolButton:checked:hover {\n"
" border: 1px solid #148CD2;\n"
"}\n"
"\n"
"QToolBar::handle:horizontal {\n"
" width: 16px;\n"
" image: url(\":/qss_icons/rc/toolbar_move_horizontal.png\");\n"
"}\n"
"\n"
"QToolBar::handle:vertical {\n"
" height: 16px;\n"
" image: url(\":/qss_icons/rc/toolbar_move_vertical.png\");\n"
"}\n"
"\n"
"QToolBar::separator:horizontal {\n"
" width: 16px;\n"
" image: url(\":/qss_icons/rc/toolbar_separator_horizontal.png\");\n"
"}\n"
"\n"
"QToolBar::separator:vertical {\n"
" height: 16px;\n"
" image: url(\":/qss_icons/rc/toolbar_separator_vertical.png\");\n"
"}\n"
"\n"
"QToolButton#qt_toolbar_ext_button {\n"
" background: #32414B;\n"
" border: 0px;\n"
" color: #F0F0F0;\n"
" image: url(\":/qss_icons/rc/arrow_right.png\");\n"
"}\n"
"\n"
"/* QAbstractSpinBox -------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QAbstractSpinBox {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" color: #F0F0F0;\n"
" /* This fixes 103, 111 */\n"
" padding-top: 2px;\n"
" /* This fixes 103, 111 */\n"
" padding-bottom: 2px;\n"
" padding-left: 4px;\n"
" padding-right: 4px;\n"
" border-radius: 4px;\n"
" /* min-width: 5px; removed to fix 109 */\n"
"}\n"
"\n"
"QAbstractSpinBox:up-button {\n"
" background-color: transparent #19232D;\n"
" subcontrol-origin: border;\n"
" subcontrol-position: top right;\n"
" border-left: 1px solid #32414B;\n"
" border-bottom: 1px | |
<gh_stars>1-10
import logging
import numpy as np
import xarray as xr
from scipy.ndimage import uniform_filter
from wind_repower_usa.calculations import calc_simulated_energy
from wind_repower_usa.constants import KM_TO_METER
from wind_repower_usa.geographic_coordinates import geolocation_distances
from wind_repower_usa.load_data import load_turbines
from wind_repower_usa.util import turbine_locations, edges_to_center, choose_samples
def calc_wind_rose(turbines, wind_speed, wind_velocity, power_curve=None, bins=70,
directivity_width=15, num_samples=1000):
"""Calculate prevailing wind direction for each turbine location in ``turbines``. A wind rose is
calculated by the amount of energy produced by wind blowing in a certain wind direction using a
specific power curve. Note that definition of wind rose differs slightly from usual
conventions, because this calculates the amplitude using produced energy and not wind speed.
Parameters
----------
turbines : xr.DataSet
as returned by load_turbines()
wind_speed : xr.DataArray
as returned by load_wind_speed()
wind_velocity : xr.Dataset
as downloaded from ERA5
power_curve : callable
a function mapping wind speed to power
bins : int
bins for histogram of distribution of energy (~wind speed) over direction
directivity_width : float (in degree)
see directivity below
num_samples : int
Returns
-------
wind_rose : xr.DataArray
contains ratio of wind energy produced into a certain direction for each location for the
given power_curve, i.e. integral over dim=direction should be 1.
dims = turbines, direction
prevail_wind_direction : np.array
direction in rad for each turbine location (between -np.pi and np.pi, i.e. 0 is east,
np.pi/2 is north)
directivity : np.array
For each turbine location, directivity is defined as percentage of energy in a certain
direction with angle ``directivity_width``. This means setting this to 15° means that a
result of directivity=0.4 that there is a direction which leads to 40% of energy
production in 15° angle (that is +/- 7.5°). The direction is given by
prevail_wind_direction.
"""
# TODO speed up potential of this function:
# - choose some wind speed samples only
# - calculate only once for entire park
wind_speed, wind_velocity = choose_samples(wind_speed, wind_velocity,
num_samples=num_samples, dim='time')
logging.info("Interpolating wind velocity at turbine locations...")
# interpolation is already done, but only stored as wind speed, u/v components not separately
wind_velocity_at_turbines = wind_velocity.interp(
longitude=xr.DataArray(turbines.xlong.values, dims='turbines'),
latitude=xr.DataArray(turbines.ylat.values, dims='turbines'),
method='linear')
# TODO this might be more accurate using Vincenty’s formula, right? Or is wind direction
# different from calculating Azimuth? Anyway it should be good enough for our purposes.
# see also Azimuth calculator: https://www.cqsrg.org/tools/GCDistance/
# FIXME compare differences between 100m and 10m
logging.info("Calculate wind directions...")
directions = np.arctan2(wind_velocity_at_turbines.v100,
wind_velocity_at_turbines.u100).compute()
energy = calc_simulated_energy(wind_speed,
turbines,
power_curve=power_curve,
sum_along='',
only_built_turbines=False)
boxcar_width_angle = np.radians(directivity_width)
wind_roses_list = []
directivity = []
prevail_wind_direction = []
logging.info("Calculate distribution of directions per turbine location...")
# would be great to use np.histogramdd() instead of this loop but somehow doesn't seem to work
for turbine_idx in range(directions.values.shape[1]):
hist = np.histogram(directions.values.T[turbine_idx, :],
weights=energy.values.T[turbine_idx, :],
range=(-np.pi, np.pi), bins=bins, density=True)
values, bin_edges = hist
wind_roses_list.append(values)
# TODO calculating this inside loop is bad, should yield same results every iteration
bin_centers = edges_to_center(bin_edges)
boxcar_width = int(np.round(boxcar_width_angle / (2 * np.pi) * bins))
#
convoluted = uniform_filter(values, boxcar_width, mode='wrap')
# In case of multiple maxima it might make sense to take the central one or so,
# but this can only occur if wind speed is equally strong in an interval larger than
# boxcar_width.
prevail_wind_direction.append(bin_centers[np.argmax(convoluted)])
# TODO this value might not really make sense that way, actually one needs a whole
# profile for different values of boxcar_width
directivity.append(np.max(convoluted) * boxcar_width_angle)
wind_rose = xr.DataArray(wind_roses_list,
dims=('turbines', 'direction'),
coords={'direction': bin_centers,
'turbines': turbines.turbines})
prevail_wind_direction_xr = xr.DataArray(prevail_wind_direction, dims='turbines',
coords={'turbines': turbines.turbines})
directivity = xr.DataArray(directivity, dims='turbines',
coords={'turbines': turbines.turbines})
wind_rose.attrs['bins'] = bins
wind_rose.attrs['directivity_width'] = directivity_width
wind_rose.attrs['num_samples'] = num_samples
prevail_wind_direction_xr['bins'] = bins
prevail_wind_direction_xr['directivity_width'] = directivity_width
prevail_wind_direction_xr['num_samples'] = num_samples
directivity['bins'] = bins
directivity['directivity_width'] = directivity_width
directivity['num_samples'] = num_samples
return wind_rose, prevail_wind_direction_xr, directivity
def calc_directions(turbines, prevail_wind_direction=None):
"""Calculate pairwise directions from each turbine location to each other turbine location.
Parameters
----------
turbines : xr.DataSet
as returned by load_turbines()
prevail_wind_direction : xr.DataArray (dim = turbines)
will be used to orientate distances relative to prevailing wind direction,
pass an xr.DataArray with zeros to get distances per absolute directions (not relative to
prevailing wind direction)
Returns
-------
xr.DataArray (dims: turbines, targets)
direction of vector from turbines to target, where targets is a copy of turbines relative to
prevailing wind direction, NaN in diagonal to avoid subsequent mistakes when calculating
with diagonal accidentally
"""
# targets are a copy of turbines: for each turbine locations angle of the vector to each
# target location will be calculated, sorted into bins of regular angles and then the closest
# turbine per bin is chosen to assign a distance to turbine per direction.
targets = turbines.rename({'turbines': 'targets'})
# pairwise directions from each turbine to each other one - meshgrid magic using xarray, yeah!
directions = np.arctan2(targets.ylat - turbines.ylat, targets.xlong - turbines.xlong)
if prevail_wind_direction is not None:
directions = directions - prevail_wind_direction
# all angles in mathematical orientation between -pi and pi
directions = (directions + np.pi) % (2 * np.pi) - np.pi
# there is no real meaning to calculate the rotation of a vector of length 0...
directions.values[np.diag_indices_from(directions.values)] = np.nan
return directions
def calc_dist_in_direction_cluster(turbines, prevail_wind_direction, bin_size_deg=15):
"""Same as calc_dist_in_direction(), but intended for one cluster only. Calculates a squared
distance matrix (and a squared direction matrix) and therefore RAM usage is O(len(turbines)^2).
Parameters
----------
turbines : xr.DataSet
as returned by load_turbines()
prevail_wind_direction : xr.DataArray (dim = turbines)
will be used to orientate distances relative to prevailing wind direction,
pass an xr.DataArray with zeros to get distances per absolute directions (not relative to
prevailing wind direction)
bin_size_deg : float
size of direction bins in degrees
Returns
-------
xr.DataArray
dims: turbines, direction
direction is relative to prevail_wind_direction, i.e. 0° = in prevailing wind direction,
and otherwise counter-clockwise relative to 0°
"""
directions = calc_directions(turbines, prevail_wind_direction)
# directions is actually not used here, because bins and range are provided (except for dtype)
bin_edges = np.histogram_bin_edges(directions,
bins=360//bin_size_deg,
range=(-np.pi, np.pi))
num_bins = len(bin_edges) - 1 # Attention, fencepost problem!
# np.digitize does not return the n-th bin, but the n+1-th bin!
# This is not a symmetric matrix, directions get flipped by 180° if dims is provided in wrong
# order, but it is not at all clear how xarray defines the order (probably the order of
# usage of dims 'targets' and 'turbines' in the arctan2() call above).
bin_idcs = np.digitize(directions, bin_edges) - 1
bin_idcs = xr.DataArray(bin_idcs, dims=('targets', 'turbines'), # targets = closest turbines
coords={'turbines': turbines.turbines})
locations = turbine_locations(turbines)
distances = geolocation_distances(locations)
# set distance to itself to INF to avoid zero distance minimums later
distances[np.diag_indices_from(distances)] = np.inf
distances = xr.DataArray(distances, dims=('turbines', 'targets'),
coords={'turbines': turbines.turbines})
bin_centers = edges_to_center(bin_edges)
direction_bins = xr.DataArray(np.arange(num_bins), dims='direction',
coords={'direction': bin_centers})
return xr.where(bin_idcs == direction_bins, distances, np.inf).min(dim='targets')
def calc_dist_in_direction(cluster_per_location, prevail_wind_direction, turbines=None,
bin_size_deg=15):
"""Directions between 0° and 360° will be grouped into bins of size ``bin_size_deg``,
then for each turbine location the distance to the next turbine is calculated for each
direction bin. Assumes that distance between clusters is infinite and therefore computation
can be done for each cluster independently.
Parameters
----------
cluster_per_location : array_like of int
cluster index for each turbine
prevail_wind_direction : xr.DataArray (dim = turbines)
will be used to orientate distances relative to prevailing wind direction,
pass an xr.DataArray with zeros to get distances per absolute directions (not relative to
prevailing wind direction)
turbines : xr.DataSet
as returned by load_turbines()
bin_size_deg : float
size of direction bins in degrees
Returns
-------
xr.DataArray
dims: turbines, direction
direction is relative to prevail_wind_direction, i.e. 0rad = in prevailing wind direction,
and otherwise counter-clockwise relative to 0rad
"""
if turbines is None:
turbines = load_turbines()
n_bins = 360//bin_size_deg
distances = np.ones((turbines.sizes['turbines'], n_bins)) * np.nan
distances = xr.DataArray(distances, dims=('turbines', 'direction'))
d = None
iterator = zip(turbines.groupby(cluster_per_location),
prevail_wind_direction.groupby(cluster_per_location))
# TODO this loop could be parallelized, but a lock is needed for writing to distances, right?
# how about using dask.bag.foldby? would it help to use dask.delayed to speed up the inner
# loop and then combine results sequential?
for ((idx_turbine, turbines_cluster), (idx_prevail, prevail_cluster)) in iterator:
if | |
# Import Google TransitFeed
import transitfeed
from transitfeed import ServicePeriod
# Version
mjts_version = "0.0.2"
# New Schedule
schedule = transitfeed.Schedule()
# Create Agency
schedule.AddAgency("Moose Jaw Transit Service", "http://www.moosejaw.ca/?service=city-of-moose-jaw-transit-division",
"America/Regina")
# Calendars
service_periods = []
## Weekday
service_periods.append(ServicePeriod(id="weekday"))
service_periods[0].SetWeekdayService()
service_periods[0].SetStartDate("20151005")
service_periods[0].SetEndDate("20161005")
### Holidays/No Service Days
service_periods[0].SetDateHasService('20151224', False)
service_periods[0].SetDateHasService('20151225', False)
service_periods[0].SetDateHasService('20151226', False)
## Saturday
service_periods.append(ServicePeriod(id="saturday"))
service_periods[1].SetDayOfWeekHasService(5)
service_periods[1].SetStartDate("20151005")
service_periods[1].SetEndDate("20161005")
### Holidays/No Service Days
service_periods[1].SetDateHasService('20151224', False)
service_periods[1].SetDateHasService('20151225', False)
service_periods[1].SetDateHasService('20151226', False)
#Add all service period objects to the schedule
schedule.SetDefaultServicePeriod(service_periods[0], validate=True)
schedule.AddServicePeriodObject(service_periods[1], validate=True)
# Fares
# Routes
## 1- Athabasca East
ABE = schedule.AddRoute(short_name="1", long_name="Athabasca East",
route_type="Bus")
## 2- Sunningdale
SUN = schedule.AddRoute(short_name="2", long_name="Sunningdale",
route_type="Bus")
## 3- Athabasca West
ABW = schedule.AddRoute(short_name="3", long_name="Athabasca West",
route_type="Bus")
## 4- Westmonut
WES = schedule.AddRoute(short_name="4", long_name="Westmount",
route_type="Bus")
# Stops
## Athabasca East Stops
abe_1001 = schedule.AddStop(lng=-105.535112, lat=50.391708, name = "Main St. N @ High St. W")
abe_1002 = schedule.AddStop(lng=-105.537392, lat=50.391613, name = "High St. W @ 1st Ave. NW")
abe_1003 = schedule.AddStop(lng=-105.537371, lat=50.396825, name = "1st Ave. NW @ Caribou St. W")
abe_1004 = schedule.AddStop(lng=-105.534774, lat=50.396845, name = "Caribou St. E @ Main St. N")
abe_1005 = schedule.AddStop(lng=-105.531893, lat=50.396877, name = "Caribou St. E @ 1st Ave. NE")
abe_1006 = schedule.AddStop(lng=-105.528183, lat=50.396880, name = "Caribou St. E @ Ross Cres.@3rd Ave. NE")
abe_1007 = schedule.AddStop(lng=-105.524750, lat=50.396858, name = "Caribou St. E @ 4th Ave. NE")
abe_1008 = schedule.AddStop(lng=-105.524557, lat=50.398884, name = "4th Ave. NE @ Oxford St. E")
abe_1009 = schedule.AddStop(lng=-105.524633, lat=50.400889, name = "4th Ave. NE @ Hall St. E")
abe_1010 = schedule.AddStop(lng=-105.524658, lat=50.402656, name = "4th Ave. NE @ Saskatchewan St. E")
abe_1011 = schedule.AddStop(lng=-105.527507, lat=50.402627, name = "Saskatchewan St. E @ 3rd Ave. NE")
abe_1012 = schedule.AddStop(lng=-105.531771, lat=50.402648, name = "Saskatchewan St. E @ 1st Ave. NE")
abe_1013 = schedule.AddStop(lng=-105.534698, lat=50.406387, name = "Town & Country Dr. @ Main St. N")
abe_1014 = schedule.AddStop(lng=-105.533552, lat=50.412926, name = "Main St. East Service Road @ Thatcher Dr. E")
abe_1015 = schedule.AddStop(lng=-105.527029, lat=50.419106, name = "Dr. <NAME> Regional Hospital")
abe_1016 = schedule.AddStop(lng=-105.521458, lat=50.411943, name = "Highland Rd. @ Thatcher Dr. E")
abe_1017 = schedule.AddStop(lng=-105.515950, lat=50.411921, name = "Thatcher Dr. E @ Chester Rd")
abe_1018 = schedule.AddStop(lng=-105.511780, lat=50.411853, name = "Thatcher Dr. E @ 9th Ave. NE")
abe_1019 = schedule.AddStop(lng=-105.511762, lat=50.407610, name = "9th Ave. NE @ Prairie Oasis Trailer Court")
abe_1020 = schedule.AddStop(lng=-105.511820, lat=50.404814, name = "9th Ave. NE @ Lakeview Trailer Court")
abe_1021 = schedule.AddStop(lng=-105.511825, lat=50.396861, name = "9th Ave. NE @ Caribou St. E")
abe_1022 = schedule.AddStop(lng=-105.508979, lat=50.396846, name = "Caribou St. E @ 10th Ave. NE")
abe_1023 = schedule.AddStop(lng=-105.505852, lat=50.395337, name = "11th Ave. NE @ Athabasca St. E")
abe_1024 = schedule.AddStop(lng=-105.508771, lat=50.395345, name = "Athabasca St. E @ 10th Ave. NE")
abe_1025 = schedule.AddStop(lng=-105.511806, lat=50.395353, name = "Athabasca St. E @ 9th Ave. NE")
abe_1026 = schedule.AddStop(lng=-105.515020, lat=50.395332, name = "Athabasca St. E @ 8th Ave. N")
abe_1027 = schedule.AddStop(lng=-105.517468, lat=50.395330, name = "Athabasca St. E @ 7th Ave. NE")
abe_1028 = schedule.AddStop(lng=-105.517468, lat=50.393499, name = "7th Ave. NE @ Ominica St. E")
abe_1029 = schedule.AddStop(lng=-105.517431, lat=50.392565, name = "7th Ave. NE @ Fairford St. E")
abe_1030 = schedule.AddStop(lng=-105.519853, lat=50.392563, name = "Fairford St. E @ 6th Ave. NE")
abe_1031 = schedule.AddStop(lng=-105.524809, lat=50.392491, name = "Fairford St. E @ 4th Ave. NE")
abe_1032 = schedule.AddStop(lng=-105.527268, lat=50.392518, name = "Fairford St. E @ 3rd Ave. NE")
abe_1033 = schedule.AddStop(lng=-105.529674, lat=50.391562, name = "High St. E @ 2nd Ave. NE")
abe_1034 = schedule.AddStop(lng=-105.532080, lat=50.391564, name = "High St. E @ 1st Ave. N")
## Sunningdale Stops
sun_2001 = schedule.AddStop(lng=-105.534554, lat=50.391816, name = "Main St. N @ High St. W")
sun_2002 = schedule.AddStop(lng=-105.534547, lat=50.395128, name = "Main St. N @ Athabasca St. E")
sun_2003 = schedule.AddStop(lng=-105.534606, lat=50.397097, name = "Main St. N @ Caribou St. E")
sun_2004 = schedule.AddStop(lng=-105.534628, lat=50.400801, name = "Main St. N @ Hall St. E")
sun_2005 = schedule.AddStop(lng=-105.532141, lat=50.404088, name = "Town & Country Dr. @ Civic Centre Dr.")
sun_2006 = schedule.AddStop(lng=-105.534698, lat=50.406387, name = "Town & Country Dr. @ Main St. N")
sun_2007 = schedule.AddStop(lng=-105.536385, lat=50.412762, name = "Thatcher Dr. W @ Main St. West Service Road")
sun_2008 = schedule.AddStop(lng=-105.538747, lat=50.413823, name = "Woodlilly Dr. @ Arrowhead Rd.")
sun_2009 = schedule.AddStop(lng=-105.538403, lat=50.415286, name = "Woodlilly Dr. @ Aster Cres.")
sun_2010 = schedule.AddStop(lng=-105.538396, lat=50.416217, name = "Woodlilly Dr. @ Buttercup Cres.")
sun_2011 = schedule.AddStop(lng=-105.538564, lat=50.418213, name = "Woodlilly Dr. @ Crocus Rd.")
sun_2012 = schedule.AddStop(lng=-105.539069, lat=50.419862, name = "Woodlilly Dr. @ Dahlia Cres. S")
sun_2013 = schedule.AddStop(lng=-105.541255, lat=50.422278, name = "Woodlilly Dr. @ Dahlia Cres. N")
sun_2014 = schedule.AddStop(lng=-105.541936, lat=50.422491, name = "Woodlilly Dr. @ Flax Rd.")
sun_2015 = schedule.AddStop(lng=-105.550880, lat=50.423054, name = "Woodlilly Dr. @ Calypso Dr.")
sun_2016 = schedule.AddStop(lng=-105.554598, lat=50.420152, name = "Woodlilly Dr. @ Iris Dr.")
sun_2017 = schedule.AddStop(lng=-105.554614, lat=50.417789, name = "Woodlilly Dr. @ Lewry Cres.")
sun_2018 = schedule.AddStop(lng=-105.554574, lat=50.413746, name = "Woodlilly Dr. @ Thorn Cres.")
sun_2019 = schedule.AddStop(lng=-105.565848, lat=50.413261, name = "Thatcher Dr. W @ 11th Ave. NW")
sun_2020 = schedule.AddStop(lng=-105.565889, lat=50.411244, name = "11th Ave. NW")
sun_2021 = schedule.AddStop(lng=-105.565857, lat=50.409446, name = "11th Ave. NW @ Normandy Dr.")
sun_2022 = schedule.AddStop(lng=-105.562648, lat=50.409395, name = "Normandy Dr. @ General Cres.")
sun_2023 = schedule.AddStop(lng=-105.561139, lat=50.409373, name = "Normandy Dr. @ Marshall Cres. E")
sun_2024 = schedule.AddStop(lng=-105.557632, lat=50.404846, name = "9th Ave. NW @ MacDonald St. W")
sun_2025 = schedule.AddStop(lng=-105.552479, lat=50.404839, name = "MacDonald St. W @ 7th Ave. NW")
sun_2026 = schedule.AddStop(lng=-105.549693, lat=50.404847, name = "MacDonald St. W @ 6th Ave. NW")
sun_2027 = schedule.AddStop(lng=-105.545519, lat=50.404871, name = "MacDonald St. W @ 4th Ave. NW")
sun_2028 = schedule.AddStop(lng=-105.542706, lat=50.404874, name = "MacDonald St. W @ 3rd Ave. NW")
sun_2029 = schedule.AddStop(lng=-105.538524, lat=50.404879, name = "MacDonald St. W @ Redland Ave. NW")
sun_2030 = schedule.AddStop(lng=-105.534626, lat=50.403219, name = "Main St. N @ Saskatchewan St. W")
sun_2031 = schedule.AddStop(lng=-105.534832, lat=50.400466, name = "Main St. N @ Hall St. W")
sun_2032 = schedule.AddStop(lng=-105.534842, lat=50.398715, name = "Main St. N @ Oxford St. W")
sun_2033 = schedule.AddStop(lng=-105.534848, lat=50.397655, name = "Main St. N @ Ross St. W")
sun_2034 = schedule.AddStop(lng=-105.534821, lat=50.395524, name = "Main St. N @ Athabasca St. W")
sun_2035 = schedule.AddStop(lng=-105.534837, lat=50.394191, name = "Main St. N @ Stadacona St. W")
sun_2036 = schedule.AddStop(lng=-105.534783, lat=50.391786, name = "Main St. N @ High St. W")
## Athabasca West Stops
abw_3001 = schedule.AddStop(lng=-105.534557, lat=50.391898, name = "Main St. N @ High St. W")
abw_3002 = schedule.AddStop(lng=-105.534583, lat=50.393453, name = "Main St. N @ Ominica St. E")
abw_3003 = schedule.AddStop(lng=-105.534585, lat=50.394379, name = "Main St. N @ Stadacona St. E")
abw_3004 = schedule.AddStop(lng=-105.534759, lat=50.395276, name = "Main St. N @ Athabasca St. E")
abw_3005 = schedule.AddStop(lng=-105.534775, lat=50.397846, name = "Main St. N @ Ross. St. E")
abw_3006 = schedule.AddStop(lng=-105.535097, lat=50.398949, name = "Oxford St. W @ Main St. N")
abw_3007 = schedule.AddStop(lng=-105.538723, lat=50.398908, name = "Oxford St. W @ Redland Ave. N")
abw_3008 = schedule.AddStop(lng=-105.544121, lat=50.398931, name = "Oxford St. W @ Henleaze Ave. N")
abw_3009 = schedule.AddStop(lng=-105.549653, lat=50.398924, name = "Oxford St. W @ 6th Ave. NW")
abw_3010 = schedule.AddStop(lng=-105.553907, lat=50.398914, name = "Oxford St. W @ Monk Ave. N")
abw_3011 = schedule.AddStop(lng=-105.557577, lat=50.398907, name = "Oxford St. W @ 9th Ave. NW")
abw_3012 = schedule.AddStop(lng=-105.561411, lat=50.398498, name = "Albert St. N @ 10th Ave. NW")
abw_3013 = schedule.AddStop(lng=-105.565239, lat=50.398485, name = "Albert St. N @ 11th Ave. NW")
abw_3014 = schedule.AddStop(lng=-105.565233, lat=50.400107, name = "11th Ave. NW @ Carleton St. W")
abw_3015 = schedule.AddStop(lng=-105.565290, lat=50.403271, name = "11th Ave. NW @ Grace St. W")
abw_3016 = schedule.AddStop(lng=-105.568479, lat=50.403016, name = "Grace St. W @ Gordon Rd. N")
abw_3017 = schedule.AddStop(lng=-105.573261, lat=50.401748, name = "Grace St. W @ 13th Ave. NW")
abw_3018 = schedule.AddStop(lng=-105.577126, lat=50.399929, name = "Grace St. W @ Prince Charles Pl.")
abw_3019 = schedule.AddStop(lng=-105.578960, lat=50.399818, name = "Grace St. W @ Holdsworth Cres.")
abw_3020 = schedule.AddStop(lng=-105.582893, lat=50.399815, name = "Grace St. W @ Corman Cres.")
abw_3021 = schedule.AddStop(lng=-105.584158, lat=50.402874, name = "Thatcher Dr. W @ Rutherford St. W")
abw_3022 = schedule.AddStop(lng=-105.582415, lat=50.406247, name = "Thatcher Dr. W @ 13th Ave. NW")
abw_3023 = schedule.AddStop(lng=-105.581528, lat=50.405857, name = "13th Ave. NW @ Pascoe Dr. W")
abw_3024 = schedule.AddStop(lng=-105.579708, lat=50.404858, name = "13th Ave. NW @ Mayberry Cres.")
abw_3025 = schedule.AddStop(lng=-105.577642, lat=50.403978, name = "13th Ave. NW @ Regal Cres.")
abw_3026 = schedule.AddStop(lng=-105.574382, lat=50.402371, name = "13th Ave. NW @ Gordon Rd. N")
abw_3027 = schedule.AddStop(lng=-105.571680, lat=50.400022, name = "13th Ave. NW @ Carleton St. W")
abw_3028 = schedule.AddStop(lng=-105.571753, lat=50.397651, name = "13th Ave. NW @ Montgomery St. W")
abw_3029 = schedule.AddStop(lng=-105.574652, lat=50.396813, name = "Caribou St. W @ 14th Ave. NW")
abw_3030 = schedule.AddStop(lng=-105.574611, lat=50.395229, name = "Athabasca St. W @ 14th Ave. NW")
abw_3031 = schedule.AddStop(lng=-105.571741, lat=50.395244, name = "Athabasca St. W @ 13th Ave. NW")
abw_3032 = schedule.AddStop(lng=-105.568876, lat=50.395249, name = "Athabasca St. W @ | |
setting the value, but the module that
is checking the value has its own __context__.
Returns:
bool: ``True`` if successful, otherwise ``False``
"""
if "lgpo.adv_audit_data" not in __context__ or refresh is True:
system_root = os.environ.get("SystemRoot", "C:\\Windows")
f_audit = os.path.join(system_root, "security", "audit", "audit.csv")
# Make sure the csv file exists before trying to open it
_advaudit_check_csv()
audit_settings = {}
with salt.utils.files.fopen(f_audit, mode="r") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
audit_settings.update({row["Subcategory"]: row["Setting Value"]})
__context__["lgpo.adv_audit_data"] = audit_settings
return __context__["lgpo.adv_audit_data"].get(option, None)
def _set_advaudit_file_data(option, value):
"""
Helper function that sets the Advanced Audit settings in the two .csv files
on Windows. Those files are located at:
C:\\Windows\\Security\\Audit\\audit.csv
C:\\Windows\\System32\\GroupPolicy\\Machine\\Microsoft\\Windows NT\\Audit\\audit.csv
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
"""
# Set up some paths here
system_root = os.environ.get("SystemRoot", "C:\\Windows")
f_audit = os.path.join(system_root, "security", "audit", "audit.csv")
f_audit_gpo = os.path.join(
system_root,
"System32",
"GroupPolicy",
"Machine",
"Microsoft",
"Windows NT",
"Audit",
"audit.csv",
)
f_temp = tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".csv", prefix="audit"
)
# Lookup dict for "Inclusion Setting" field
auditpol_values = {
"None": "No Auditing",
"0": "No Auditing",
"1": "Success",
"2": "Failure",
"3": "Success and Failure",
}
# Make sure the csv file exists before trying to open it
_advaudit_check_csv()
try:
# Open the existing audit.csv and load the csv `reader`
with salt.utils.files.fopen(f_audit, mode="r") as csv_file:
reader = csv.DictReader(csv_file)
# Open the temporary .csv and load the csv `writer`
with salt.utils.files.fopen(f_temp.name, mode="w") as tmp_file:
writer = csv.DictWriter(tmp_file, fieldnames=reader.fieldnames)
# Write the header values (labels)
writer.writeheader()
value_written = False
# Loop through the current audit.csv and write the changes to
# the temp csv file for existing settings
for row in reader:
# If the row matches the value we're setting, update it with
# the new value
if row["Subcategory"] == option:
if not value == "None":
# The value is not None, make the change
row["Inclusion Setting"] = auditpol_values[value]
row["Setting Value"] = value
log.trace("LGPO: Setting %s to %s", option, value)
writer.writerow(row)
else:
# value is None, remove it by not writing it to the
# temp file
log.trace("LGPO: Removing %s", option)
value_written = True
# If it's not the value we're setting, just write it
else:
writer.writerow(row)
# If a value was not written, it is a new setting not found in
# the existing audit.cvs file. Add the new setting with values
# from the defaults
if not value_written:
if not value == "None":
# value is not None, write the new value
log.trace("LGPO: Setting %s to %s", option, value)
defaults = _get_advaudit_defaults(option)
writer.writerow(
{
"Machine Name": defaults["Machine Name"],
"Policy Target": defaults["Policy Target"],
"Subcategory": defaults["Subcategory"],
"Subcategory GUID": defaults["Subcategory GUID"],
"Inclusion Setting": auditpol_values[value],
"Exclusion Setting": defaults["Exclusion Setting"],
"Setting Value": value,
}
)
value_written = True
if value_written:
# Copy the temporary csv file over the existing audit.csv in both
# locations if a value was written
__salt__["file.copy"](f_temp.name, f_audit, remove_existing=True)
__salt__["file.makedirs"](f_audit_gpo)
__salt__["file.copy"](f_temp.name, f_audit_gpo, remove_existing=True)
finally:
f_temp.close()
__salt__["file.remove"](f_temp.name)
return value_written
def _set_advaudit_pol_data(option, value):
"""
Helper function that updates the current applied settings to match what has
just been set in the audit.csv files. We're doing it this way instead of
running `gpupdate`
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
"""
auditpol_values = {
"None": "No Auditing",
"0": "No Auditing",
"1": "Success",
"2": "Failure",
"3": "Success and Failure",
}
defaults = _get_advaudit_defaults(option)
return __utils__["auditpol.set_setting"](
name=defaults["Auditpol Name"], value=auditpol_values[value]
)
def _set_advaudit_value(option, value):
"""
Helper function to update the Advanced Audit policy on the machine. This
function modifies the two ``audit.csv`` files in the following locations:
C:\\Windows\\Security\\Audit\\audit.csv
C:\\Windows\\System32\\GroupPolicy\\Machine\\Microsoft\\Windows NT\\Audit\\audit.csv
Then it applies those settings using ``auditpol``
After that, it updates ``__context__`` with the new setting
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
"""
# Set the values in both audit.csv files
if not _set_advaudit_file_data(option=option, value=value):
raise CommandExecutionError("Failed to set audit.csv option: {}".format(option))
# Apply the settings locally
if not _set_advaudit_pol_data(option=option, value=value):
# Only log this error, it will be in effect the next time the machine
# updates its policy
log.error(
"Failed to apply audit setting: %s\n"
"Policy will take effect on next GPO update",
option,
)
# Make sure lgpo.adv_audit_data is loaded
if "lgpo.adv_audit_data" not in __context__:
_get_advaudit_value(option)
# Update __context__
if value is None:
log.debug("LGPO: Removing Advanced Audit data: %s", option)
__context__["lgpo.adv_audit_data"].pop(option)
else:
log.debug("LGPO: Updating Advanced Audit data: %s: %s", option, value)
__context__["lgpo.adv_audit_data"][option] = value
return True
def _get_netsh_value(profile, option):
if "lgpo.netsh_data" not in __context__:
__context__["lgpo.netsh_data"] = {}
if profile not in __context__["lgpo.netsh_data"]:
log.debug("LGPO: Loading netsh data for %s profile", profile)
settings = salt.utils.win_lgpo_netsh.get_all_settings(
profile=profile, store="lgpo"
)
__context__["lgpo.netsh_data"].update({profile: settings})
log.trace(
"LGPO: netsh returning value: %s",
__context__["lgpo.netsh_data"][profile][option],
)
return __context__["lgpo.netsh_data"][profile][option]
def _set_netsh_value(profile, section, option, value):
if section not in ("firewallpolicy", "settings", "logging", "state"):
raise ValueError("LGPO: Invalid section: {}".format(section))
log.trace(
"LGPO: Setting the following\nProfile: %s\nSection: %s\nOption: %s\nValue: %s",
profile,
section,
option,
value,
)
if section == "firewallpolicy":
salt.utils.win_lgpo_netsh.set_firewall_settings(
profile=profile,
inbound=value if option == "Inbound" else None,
outbound=value if option == "Outbound" else None,
store="lgpo",
)
if section == "settings":
salt.utils.win_lgpo_netsh.set_settings(
profile=profile, setting=option, value=value, store="lgpo"
)
if section == "state":
salt.utils.win_lgpo_netsh.set_state(profile=profile, state=value, store="lgpo")
if section == "logging":
if option in ("FileName", "MaxFileSize"):
if value == "Not configured":
value = "notconfigured"
# Trim log for the two logging options
if option.startswith("Log"):
option = option[3:]
salt.utils.win_lgpo_netsh.set_logging_settings(
profile=profile, setting=option, value=value, store="lgpo"
)
log.trace("LGPO: Clearing netsh data for %s profile", profile)
__context__["lgpo.netsh_data"].pop(profile)
return True
def _load_secedit_data():
"""
Helper function that loads secedit data. It runs `secedit /export /cfg
<file_name>` which creates a file that contains the secedit data.
Returns:
str: The contents of the file generated by the secedit command
"""
f_exp = os.path.join(__opts__["cachedir"], "secedit-{}.txt".format(UUID))
try:
__salt__["cmd.run"](["secedit", "/export", "/cfg", f_exp])
with salt.utils.files.fopen(f_exp, encoding="utf-16") as fp:
secedit_data = fp.readlines()
return secedit_data
finally:
if __salt__["file.file_exists"](f_exp):
__salt__["file.remove"](f_exp)
def _get_secedit_data(refresh=False):
"""
Helper function that returns the secedit data in __context__ if it exists
and puts the secedit data in __context__ if it does not.
Args:
refresh (bool):
Refresh secedit data stored in __context__. This is needed for
testing where the state is setting the value, but the module that
is checking the value has its own __context__.
Returns:
str: secedit data from __context__
"""
if "lgpo.secedit_data" not in __context__ or refresh is True:
log.debug("LGPO: Loading secedit data")
__context__["lgpo.secedit_data"] = _load_secedit_data()
return __context__["lgpo.secedit_data"]
def _get_secedit_value(option):
"""
Helper function that looks for the passed option in the secedit data
"""
secedit_data = _get_secedit_data()
for _line in secedit_data:
if _line.startswith(option):
return _line.split("=")[1].strip()
return "Not Defined"
def _write_secedit_data(inf_data):
"""
Helper function to write secedit data to the database
"""
# Set file names
# The database must persist in order for the settings to remain in effect
f_sdb = os.path.join(os.getenv("WINDIR"), "security", "database", "salt.sdb")
f_inf = os.path.join(__opts__["cachedir"], "secedit-{}.inf".format(UUID))
try:
# Write the changes to the inf file
with salt.utils.files.fopen(f_inf, "w", encoding="utf-16") as fp:
fp.write(inf_data)
# Import the template data into a database
cmd = ["secedit", "/import", "/db", f_sdb, "/cfg", f_inf]
retcode = __salt__["cmd.retcode"](cmd)
if not retcode == 0:
log.debug("Secedit failed to import template data")
return False
# Apply the security database
cmd = ["secedit", "/configure", "/db", f_sdb]
retcode = __salt__["cmd.retcode"](cmd)
if not retcode == 0:
log.debug("Secedit failed to apply security database")
return False
# Pop secedit data so it will always be current
__context__.pop("lgpo.secedit_data", None)
return True
finally:
# Cleanup our scratch files, but not the database file
if __salt__["file.file_exists"](f_inf):
__salt__["file.remove"](f_inf)
def _transform_value(value, policy, transform_type):
"""
helper function to transform the policy value into something that more
closely matches how the policy is displayed in the gpedit GUI
"""
t_kwargs = {}
if "Transform" in policy:
if transform_type in policy["Transform"]:
_policydata = _policy_info()
if transform_type + "Args" in policy["Transform"]:
t_kwargs = policy["Transform"][transform_type + "Args"]
return getattr(_policydata, policy["Transform"][transform_type])(
| |
import json
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from sklearn.datasets import load_boston
import os
import sys
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = curPath
for i in range(2):
rootPath = os.path.split(rootPath)[0]
sys.path.append(rootPath)
import numpy as np
import torch
import time
import math
from pandas import Series,DataFrame
import argparse
from src.utils import mkdir
from src.Quality_deiven_PI_Ensemble.model import *
from src.Quality_deiven_PI_Ensemble.utils import *
from Experiments.BostonHousing.utils import *
import shutil
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
if __name__ == '__main__':
# Load data
X, Y = load_data()
inputs = 13
outputs = 1
# type_in = '~' + 'boston' # data type to use - drunk_bow_tie x_cubed_gap ~boston ~concrete
# loss_type = 'gauss_like' # loss type to train on - qd_soft gauss_like(=mve) mse (mse=simple point prediction)
# n_samples = 10000 # if generating data, how many points to generate
# h_size = [50] # number of hidden units in network: [50]=layer_1 of 50, [8,4]=layer_1 of 8, layer_2 of 4
# alpha = 0.05 # data points captured = (1 - alpha)
# n_epoch = 100 # number epochs to train for
# optim = 'adam' # opitimiser - SGD adam
# l_rate = 0.03 # learning rate of optimiser
# decay_rate = 0.9 # learning rate decay
# soften = 160. # hyper param for QD_soft
# lambda_in = 15. # hyper param for QD_soft
# sigma_in = 0.2 # initialise std dev of NN weights
# is_run_test = True # if averaging over lots of runs - turns off some prints and graphs
# n_ensemble = 5 # number of individual NNs in ensemble
# n_bootstraps = 1 # how many boostrap resamples to perform
# n_runs = 20 if is_run_test else 1
# is_batch = True # train in batches?
# n_batch = 100 # batch size
# lube_perc = 90. # if model uncertainty method = perc - 50 to 100
# perc_or_norm = 'norm' # model uncertainty method - perc norm (paper uses norm)
# is_early_stop = False # stop training early (didn't use in paper)
# is_bootstrap = False if n_bootstraps == 1 else True
# train_prop = 0.9 # % of data to use as training, 0.8 for hyperparam selection
#
# out_biases = [3., -3.] # chose biases for output layer (for gauss_like is overwritten to 0,1)
# activation = 'relu' # NN activation fns - tanh relu
# Hyper-parameters
subsamples = [0.8]
lrs = [0.03]
alphas = [0.05, 0.10] # data points captured = (1 - alpha)
loss_types = ['qd_soft', 'qd_hard'] # loss type to train on - qd_soft mve mse (mse=simple point prediction)
censor_Rs = [False]
type_ins = ["pred_intervals"]
softens = [160.] # hyper param for QD_soft
lambda_ins = [5., 10.]
bias_rands = [False]
out_biases = [[3.,-3.]] # chose biases for output layer (for mve is overwritten to 0,1)
weight_decays = [1e-6]
n_nets = [10]
momentums = [0.99]
lube_perc = 90 # if model uncertainty method = perc - 50 to 100
perc_or_norm = 'norm' # model uncertainty method - perc norm (paper uses norm)
batch_size = 100
nb_epochs = 100
log_interval = 1
n_splits = 15
# plotting options
save_graphs = True
show_graphs = True
is_y_rescale = False
is_y_sort = False
var_plot = 0 # lets us plot against different variables, use 0 for univariate
is_err_bars = True
is_norm_plot = True
# Paths
base_dir = './results/qd_ensemble_results_new'
# Grid search
results = {}
for n_net in n_nets :
for subsample in subsamples:
for alpha in alphas:
for loss_type in loss_types:
for censor_R in censor_Rs:
for type_in in type_ins:
for soften in softens:
for lambda_in in lambda_ins:
for bias_rand in bias_rands:
for out_biase in out_biases:
for lr in lrs:
for momentum in momentums:
for weight_decay in weight_decays:
# pre calcs
if alpha == 0.05:
n_std_devs = 1.96
elif alpha == 0.10:
n_std_devs = 1.645
elif alpha == 0.01:
n_std_devs = 2.575
else:
raise Exception('ERROR unusual alpha')
Hps = 'N_net_' + str(n_net) + '_Subsample_' + str(subsample) + '_Alpha_' + str(alpha) +\
'_Lr_' + str(lr) + '_Momentum_' + str(momentum) + '_Weight_decay_' + str(weight_decay) \
+ '_loss_type_' + str(loss_type) + '_Type_ins_' + type_in + '_Soften_' + str(soften) \
+ '_lambda_in_' + str(lambda_in)
print('Grid search step: ' + Hps)
results_dir = base_dir + '/' + Hps
results_file = results_dir + '_results.txt'
mkdir(results_dir)
results_splits = []
for split in range(int(n_splits)):
results_dir_split = results_dir + '/split_' + str(split)
mkdir(results_dir_split)
# get splited data\dataset
X_train, y_train, X_val, y_val, X_test, y_test, y_stds = get_data_splited(split, X, Y)
trainset, valset, testset = get_dataset(X_train,y_train, X_val,y_val, X_test,y_test)
results_val = base_dir + '/results_val_split_' + str(split) + '.txt'
results_test = base_dir + '/results_test_split_' + str(split) + '.txt'
###
y_pred_all = np.zeros((n_net, X_test.shape[0], outputs * 2))
###
for iii in range(n_net):
print('Net ' + str(iii))
keep_idx = []
for idx in range(len(trainset)):
if np.random.binomial(1, subsample, size=1) == 1:
keep_idx.append(idx)
keep_idx = np.array(keep_idx)
from torch.utils.data.sampler import SubsetRandomSampler
sampler = SubsetRandomSampler(keep_idx)
use_cuda = torch.cuda.is_available()
trainloader, valloader, testloader = get_dataloader_sample(trainset, valset, testset, use_cuda, batch_size, sampler)
results_val_split = results_dir + '/results_val_split_' + str(split) + '.txt'
results_test_split = results_dir + '/results_test_split_' + str(split) + '.txt'
# net dims
cprint('c', '\nNetwork:')
net = QD_net_BH( lr=lr, input_dim=inputs, cuda=use_cuda, output_dim=outputs, batch_size=128,
type_in=type_in, alpha=alpha,loss_type=loss_type, censor_R=censor_R,
soften=soften, lambda_in=lambda_in, bias_rand=bias_rand, out_biases=out_biases,
weight_decay=weight_decay, n_hid=50, momentum=momentum)
## ---------------------------------------------------------------------------------------------------------------------
# train
epoch = 0
cprint('c', '\nTrain:')
print(' init cost variables:')
pred_cost_train = []
PICP_train = []
MPIW_train = []
cost_dev = []
PICP_dev = []
MPIW_dev = []
best_val_loss = np.inf
nb_its_dev = 1
tic0 = time.time()
early_stop = 0
for i in range(epoch, nb_epochs):
net.set_mode_train(True)
tic = time.time()
nb_samples = 0
pred_cost_train_i = 0
picp_train_i = 0
mpiw_train_i = 0
for x, y in trainloader:
cost_pred, picp, mpiw = net.fit(x, y)
pred_cost_train_i += cost_pred
picp_train_i += picp
mpiw_train_i += mpiw
nb_samples += len(x)
pred_cost_train_i /= len(trainloader)
picp_train_i /= len(trainloader)
mpiw_train_i /= len(trainloader)
pred_cost_train.append(pred_cost_train_i)
PICP_train.append(picp_train_i)
MPIW_train.append(mpiw_train_i)
toc = time.time()
net.epoch = i
# ---- print
print("it %d/%d, loss_train = %f, PICP_train = %f, MPIW_train = %f " % (
i, nb_epochs, pred_cost_train_i, picp_train_i, mpiw_train_i), end="")
cprint('r', ' time: %f seconds\n' % (toc - tic))
# ---- dev
if i % nb_its_dev == 0:
net.set_mode_train(False)
cost_dev_i = 0
picp_dev_i = 0
mpiw_dev_i = 0
for j, (x, y) in enumerate(valloader):
cost, picp, mpiw, _ = net.eval(x, y)
cost_dev_i += cost
picp_dev_i += picp
mpiw_dev_i += mpiw
nb_samples += len(x)
cost_dev_i /= len(valloader)
picp_dev_i /= len(valloader)
mpiw_dev_i /= len(valloader)
cost_dev.append(cost_dev_i)
PICP_dev.append(picp_dev_i)
MPIW_dev.append(mpiw_dev_i)
cprint('g', ' loss_val = %f, PICP_val = %f, MPIW_val = %f\n' % (cost_dev_i, picp_dev_i, mpiw_dev_i))
if cost_dev_i < best_val_loss:
best_val_loss = cost_dev_i
early_stop = 0
cprint('b', 'best_val_loss')
net.save(results_dir_split + '/theta_best_val_' + str(iii) + '.dat')
else:
early_stop += 1
if early_stop > 20 and epoch > nb_epochs/2:
break
toc0 = time.time()
runtime_per_it = (toc0 - tic0) / float(nb_epochs)
cprint('r', ' average time: %f seconds\n' % runtime_per_it)
## ---------------------------------------------------------------------------------------------------------------------
# results
net.load(results_dir_split + '/theta_best_val_' + str(iii) + '.dat')
cprint('c', '\nRESULTS:')
nb_parameters = net.get_nb_parameters()
net.set_mode_train(False)
cost_test = 0
picp_test = 0
mpiw_test = 0
start = 0
for j, (x, y) in enumerate(testloader):
end = start + len(x)
cost, picp, mpiw, out = net.eval(x, y)
y_pred_all[iii, start:end, :] = out.cpu().numpy()
start = end
cost_test += cost
picp_test += picp
mpiw_test += mpiw
cost_test /= len(testloader)
picp_test /= len(testloader)
mpiw_test /= len(testloader)
best_cost_dev = np.min(np.array(cost_dev))
best_cost_train = np.min(np.array(pred_cost_train))
picp_dev_min = np.array(PICP_dev)[::nb_its_dev].min()
mpiw_dev_min = np.array(MPIW_dev)[::nb_its_dev].min()
print(' cost_test: %f ' % (cost_test))
print(' picp_test: %f' % (picp_test))
print(' mpiw_test: %f' % (mpiw_test))
print(' cost_dev: %f (cost_train %f)' % (best_cost_dev, best_cost_train))
print(' picp_dev: %f' % (picp_dev_min))
print(' mpiw_dev: %f' % (mpiw_dev_min))
print(' nb_parameters: %d (%s)' % (nb_parameters, humansize(nb_parameters)))
print(' time_per_it: %fs\n' % (runtime_per_it))
# Storing validation results
store_results(results_val_split, ['Net_%d: PICP %f MPIW %f\n' % (iii, picp_dev_min, mpiw_dev_min)])
# Storing testing results
store_results(results_test_split, ['Net_%d: PICP %f MPIW %f \n' % (iii, picp_test, mpiw_test)])
if loss_type == 'qd_soft':
y_pred_gauss_mid, y_pred_gauss_dev, y_pred_U, \
y_pred_L = pi_to_gauss(y_pred_all, lube_perc,perc_or_norm, n_std_devs)
elif loss_type == 'gauss_like': # work out bounds given mu sigma
y_pred_gauss_mid_all = y_pred_all[:, :, 0]
# occasionally may get -ves for std dev | |
<reponame>tgweber/breadp
################################################################################
# Copyright: <NAME> 2019
#
# Apache 2.0 License
#
# This file contains code related to evaluation objects
#
################################################################################
from collections import Counter
from datetime import datetime
import hashlib
import inspect
from pprint import pformat
from breadp import ChecksNotRunException
from breadp.checks.result import BooleanResult, ListResult, MetricResult
class Evaluation(object):
""" Base class and interface for Evaluation of checks of
RDPs
Attributes
----------
id: int
Identifier for the evaluation
version: str
Version of the evaluation
descrption: str
A short text describing the criterion evaluated (in English)
checks: list
A list of checks
Methods
-------
evaluate(self, pid) -> None
Runs the evaluation
"""
def __init__(self, checks):
self.checks = checks
self.rounded = 10
self.version = "Blank evaluations have no version"
self._id = None
@property
def description(self):
return ' '.join(inspect.getdoc(self).split("\n\n")[0].split())
@property
def name(self):
return type(self).__name__
@property
def id(self):
if self._id is None:
cids = [c.id for c in self.checks]
self._id = hashlib.md5((self.name + "".join(str(c) for c in sorted(cids))).encode()).hexdigest()[2:10]
return self._id
def evaluate(self, pid):
""" Wrapper code around each evaluation
Sets start and end time, handles state, and exceptions.
Parameters
----------
pid: pid
PID of the Research Data Product to be evaluated
"""
if len(self.checks) == 0:
raise ValueError("No checks in {}".format(type(self).__name__))
for c in self.checks:
if c.get_last_result(pid) == None:
raise ChecksNotRunException(
"{} has no result for {}".format(
type(c).__name__,
pid
)
)
if not c.log.get_by_pid(pid)[-1].result.success:
return 0
return round(self._evaluate(pid)/len(self.checks), self.rounded)
def _evaluate(self, pid):
raise NotImplementedError("must be implemented by subclasses of Evaluation")
class IsBetweenEvaluation(Evaluation):
""" Each check's result's item between the (included) bounds adds
(1/#items)*1/#checks to the score.
Note: Adds 0 if the check's result is not of type MetricResult, or not of type
ListResult or the list is empty.
Attributes
----------
low: float
Lower bound of the comparison (the lower bound is included in the comparison)
high: float
Higher bound of the comparison (the higher bound is included in the comparison)
"""
def __init__(self, checks, low, high):
Evaluation.__init__(self, checks)
self.low = low
self.high = high
self.version = "0.0.1"
@property
def description(self):
description = ' '.join(inspect.getdoc(self).split("\n\n")[0].split())
description += " The lower bound is {} the upper bound is {}.".format(self.low,
self.high)
return description
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, MetricResult):
if self.low <= result.outcome <= self.high:
evaluation += 1
if isinstance(result, ListResult):
if len(result.outcome) == 0:
continue
for i in result.outcome:
try:
if self.low <= i <= self.high:
evaluation += 1/len(result.outcome)
except TypeError:
pass
return evaluation
class IsIdenticalToEvaluation(Evaluation):
""" Each check's result identical to the comparatum adds 1/#checks to the score.
Note: if the comparatum and the result are lists, their order is NOT evaluated.
comparatum: div
object to compare to
"""
def __init__(self, checks, comparatum):
Evaluation.__init__(self, checks)
self.comparatum = comparatum
self.version = "0.0.1"
@property
def description(self):
description = ' '.join(inspect.getdoc(self).split("\n\n")[0].split())
description += " The comparatum is {}.".format(pformat(self.comparatum))
return description
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, ListResult) and isinstance(self.comparatum, list):
if Counter(result.outcome) == Counter(self.comparatum):
evaluation += 1
elif self.comparatum == result.outcome:
evaluation += 1
return evaluation
class ContainsAllEvaluation(Evaluation):
""" Each check's result containing all items adds 1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult.
items: div
object to look for in ListResult
"""
def __init__(self, checks, items):
Evaluation.__init__(self, checks)
self.items = items
self.version = "0.0.1"
@property
def description(self):
description = ' '.join(inspect.getdoc(self).split("\n\n")[0].split())
description += " The items are {}.".format(pformat(self.items))
return description
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, ListResult) and set(self.items) <= set(result.outcome):
evaluation += 1
return evaluation
class ContainsAtLeastOneEvaluation(Evaluation):
""" Each check's result containing at least one of the items adds 1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult.
items: div
object to look for in ListResult
"""
def __init__(self, checks, items):
Evaluation.__init__(self, checks)
self.items = items
self.version = "0.0.1"
@property
def description(self):
description = ' '.join(inspect.getdoc(self).split("\n\n")[0].split())
description += " The items are {}.".format(pformat(self.items))
return description
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, ListResult):
for i in self.items:
if i in result.outcome:
evaluation += 1
# only count once per check!
break
return evaluation
class DoesNotContainEvaluation(Evaluation):
""" Each check's result NOT containing one of the items adds 1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult.
items: div
object to look for in ListResult
"""
def __init__(self, checks, items):
Evaluation.__init__(self, checks)
self.items = items
self.version = "0.0.1"
@property
def description(self):
description = ' '.join(inspect.getdoc(self).split("\n\n")[0].split())
description += " The items are {}.".format(pformat(self.items))
return description
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, ListResult) and len(result.outcome) > 0:
add = True
for i in self.items:
if i in result.outcome:
add = False
if add:
evaluation += 1
return evaluation
class TrueEvaluation(Evaluation):
""" Each check's result with only True values adds 1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult (or empty)
or not of BooleanResult.
"""
def __init__(self, checks):
Evaluation.__init__(self, checks)
self.version = "0.0.1"
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, BooleanResult):
if result.outcome:
evaluation += 1
continue
if isinstance(result, ListResult) and len(result.outcome) > 0:
for r in result.outcome:
if not r:
break
evaluation += 1
return evaluation
class FalseEvaluation(Evaluation):
""" Each check's result with only False values adds 1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult (or empty)
or not of BooleanResult.
"""
def __init__(self, checks):
Evaluation.__init__(self, checks)
self.version = "0.0.1"
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, BooleanResult):
if not result.outcome:
evaluation += 1
continue
if isinstance(result, ListResult) and len(result.outcome) > 0:
for r in result.outcome:
if r:
break
evaluation += 1
return evaluation
class TheMoreTrueTheBetterEvaluation(Evaluation):
""" Each check's result adds (#True/len(result))*1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult (or empty)
"""
def __init__(self, checks):
Evaluation.__init__(self, checks)
self.version = "0.0.1"
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, ListResult) and len(result.outcome) > 0:
for item in result.outcome:
if isinstance(item, bool) and item:
evaluation += 1/len(result.outcome)
return evaluation
class TheMoreFalseTheBetterEvaluation(Evaluation):
""" Each check's result adds (#False/len(result))*1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult (or empty)
"""
def __init__(self, checks):
Evaluation.__init__(self, checks)
self.version = "0.0.1"
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, ListResult) and len(result.outcome) > 0:
for item in result.outcome:
if isinstance(item, bool) and not item:
evaluation += 1/len(result.outcome)
return evaluation
class ContainsItemExactlyNTimesEvaluation(Evaluation):
""" Each check's result in which the item occurrs exactly n times
adds 1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult (or empty).
"""
def __init__(self, checks, item, n):
Evaluation.__init__(self, checks)
self.item = item
self.n = n
self.version = "0.0.1"
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, ListResult) and len(result.outcome) > 0:
if self.n == result.outcome.count(self.item):
evaluation += 1
return evaluation
class InListEvaluation(Evaluation):
""" Each check's result's item in comparata adds (1/len(result))*1/#checks to the score.
Note: Adds 0 when the check's result is not of type ListResult (or empty).
"""
def __init__(self, checks, comparata):
Evaluation.__init__(self, checks)
if not isinstance(comparata, list):
raise ValueError("comparata is not a list but a {}".format(type(comparata).__name__))
self.comparata = comparata
self.version = "0.0.1"
def _evaluate(self, pid):
evaluation = 0
for c in self.checks:
result = c.get_last_result(pid)
if isinstance(result, ListResult) and len(result.outcome) > 0:
for item in result.outcome:
if item in self.comparata:
evaluation += 1/len(result.outcome)
return evaluation
class FunctionEvaluation(Evaluation):
""" The given function determines the score.
Note: This Evaluation allows to evaluate the relation of results of different checks.
There is no need to normalize the output of the function against the number
of checks involved (the | |
########################
# Created 3-8-17 by JJW
# Some general use functions for the Fragile Families Challenge
#
#
########################
import pickle
import numpy as np
import csv
import os.path
# A dict to reference outcomes by their index in the data read in
outcome_indices = {'ID': 0, 'gpa': 1, 'grit': 2, 'materialhardship': 3,
'eviction': 4, 'layoff': 5, 'jobtraining': 6}
def read_in_data(path,
care_about_mothid1=False, remove_bad_columns=True):
"""Reads in the data and removes some columns of unusable data
or non-data
Arguments:
path {string} -- path to the data file
Keyword Arguments:
care_about_mothid1 {bool} -- whether to care about the mothid1 column (default: {False})
remove_bad_columns {bool} -- whether to remove the bad columns (default: {True})
Returns:
tuple -- first element is the header of the data file, and the second element is the data, read in
"""
the_data = []
# Read in the data
with open(path, 'r') as f:
csvreader = csv.reader(f,delimiter=',')
for row in csvreader:
the_data.append(row)
# Remove some of the columns that are id values
if 'background.csv' in path:
for line in the_data:
line = line[2:]
# Remove the header line, save it as its own thing
header = the_data.pop(0)
# Set bounds to remove other unnecessary columns
if 'train.csv' in path:
lower_bound = lambda x: 1; upper_bound = lambda y: len(y)
elif 'background.csv' in path:
lower_bound = lambda x: 0; upper_bound = lambda y: len(y) - 1
else:
raise RuntimeError("Do not understand which file type is being passed, \
and thus do not understand which bounds to use in float conversion.")
# Now, convert numerical values to actual numbers, instead of strings
for i in range(len(the_data)):
for j in range(lower_bound(the_data[i]), upper_bound(the_data[i])):
try:
temp = float(the_data[i][j])
the_data[i][j] = temp # Try to convert to float
except ValueError: # Can't convert to float
the_data[i][j] = 'NA'
# Remove some pre-determined bad columns
if 'background.csv' in path and remove_bad_columns:
columns_to_remove = np.loadtxt("columns_to_remove.txt", dtype=int)
print "Deleting " + str(len(columns_to_remove)) + " columns from " +\
"the survey data, because all the data in those columns either " +\
"are NA, are the same value, or the columns correspond to " +\
"mother or father ID numbers."
for line in the_data:
for j in range(len(columns_to_remove)):
del line[columns_to_remove[j]]
for i in range(len(columns_to_remove)):
del header[columns_to_remove[i]]
return (header, the_data)
def remove_lines_with_all_NA(outcomes_data):
"""Removes lines from the training outcomes that have
all NA values. Since we don't know what outcomes the data
have, no use training on these guys.
Arguments:
outcomes_data {list of lists} -- contains the training outcomes to be processed
Returns:
list of lists -- the original argument but with all lines containing nothing but NA's removed
Raises:
RuntimeError -- for some reason one of the internal lists didn't match the length of the input list.
"""
all_NA = [] # A list that will be filled with Boolean values,
# specifying whether all the outcomes are NA or not.
for i in range(len(outcomes_data)): # Loop through the data
# If all six outcomes are 'NA', append True to all_NA
try:
true1 = 'NA' in outcomes_data[i][outcome_indices['gpa']]
true2 = 'NA' in outcomes_data[i][outcome_indices['grit']]
true3 = 'NA' in outcomes_data[i][outcome_indices['materialhardship']]
true4 = 'NA' in outcomes_data[i][outcome_indices['eviction']]
true5 = 'NA' in outcomes_data[i][outcome_indices['layoff']]
true6 = 'NA' in outcomes_data[i][outcome_indices['jobtraining']]
if true1 and true2 and true3 and true4 and true5 and true6:
all_NA.append(True)
else: # Else append False
all_NA.append(False)
except TypeError:
all_NA.append(False)
# Checking that all_NA is the appropriate length
if len(outcomes_data) != len(all_NA):
raise RuntimeError("For some reason, all_NA is not the proper length \
(the same length as the input)")
# Form the new list based on the elements of the old list that aren't all NA
outcomes_data_removed = [list(outcomes_data[i]) for i in range(len(outcomes_data)) if all_NA[i] == False]
# Print out, letting you know how many rows are kept.
print str(len(outcomes_data_removed)) + " rows kept from the training outcomes \
out of " + str(len(outcomes_data))
# Return
return outcomes_data_removed
def match_up_data_with_training_set_of_outcomes(survey_data,
training_outcomes_data,
clean_up_training=False):
"""Match up the data rows with the corresponding outcomes
Arguments:
survey_data {array-like} -- the survey data
training_outcomes_data {list-like} -- the training outcomes
Keyword Arguments:
clean_up_training {bool} -- clean up training data if there aren't any corresponding survey data (default: {False})
Returns:
tuple -- the survey data matched up, and the training data matched up
"""
training_data_ids = []
# Get the training data outcome ids.
for i in range(len(training_outcomes_data)):
training_data_ids.append(training_outcomes_data[i][
outcome_indices['ID']])
# Match the survey data with the available training data outcomes
survey_data_to_return_temp = [list(survey_data[i]) for i in range(len(survey_data)) if
survey_data[i][-1] in training_data_ids]
# Thanks to http://jakzaprogramowac.pl/pytanie/20037,python-how-to-order-a-list-based-on-another-list for the
# Order the data by id numbers
data_order = dict(zip(training_data_ids, range(len(training_data_ids))))
survey_data_to_return = sorted(survey_data_to_return_temp, key=lambda x: data_order.get(x[-1], len(data_order)))
missing_matches = []
survey_data_to_return_ids = [item[-1] for item in survey_data_to_return]
# See if any training outcomes don't have corresponding survey data
for i in range(len(training_data_ids)):
if training_data_ids[i] not in survey_data_to_return_ids:
missing_matches.append(training_data_ids[i])
if missing_matches:
print "************************"
print "There were some id's in the training set of outcomes not in " +\
"the survey question data. Specifically, " + \
str(len(missing_matches)) + " id's."
# Clean up if allowed and necessary
if clean_up_training == False or not missing_matches:
if missing_matches:
print "Doing nothing about the missing data..."
else:
print "Training data cleanup is set to False"
training_data_to_return = [list(line) for line in training_outcomes_data]
else:
"Matching the training outcomes to the survey data"
training_data_to_return = [list(line) for line in training_outcomes_data]
missing_matches.sort(reverse="true")
for i in missing_matches:
training_data_to_return.pop(i)
return (survey_data_to_return, training_data_to_return)
def data_open_and_process(data_filename="background.csv",
training_outcomes_filename="train.csv",
remove_bad_columns=True):
"""Open and process the data
Keyword Arguments:
data_filename {str} -- the file name for the survey data (default: {"background.csv"})
training_outcomes_filename {str} -- the file name for the outcomes (default: {"train.csv"})
remove_bad_columns {bool} -- remove the bad columns(default: {True})
Returns:
dict -- this has all the information collected from opening and processing the data
"""
print "Reading in training outcomes"
# Read in the outcomes
training_outcomes_header, training_outcomes = read_in_data(training_outcomes_filename)
print "Done reading in the training outcomes, now reading in survey data."
# Read in the survey data
survey_data_header, survey_data = read_in_data(data_filename, remove_bad_columns=remove_bad_columns)
print "Done reading in survey data, now cleaning up training " +\
"outcomes with all NA's."
# Remove lines with all NA
outcomes_NAall_removed = remove_lines_with_all_NA(training_outcomes)
print "Now matching the survey data with the training outcomes, " +\
"to get a training data set."
# Match the survey data to the training data set
survey_data_matched, training_outcomes_matched = \
match_up_data_with_training_set_of_outcomes(survey_data,
outcomes_NAall_removed,
clean_up_training=True)
print "Now removing the id numbers from the data, so the data can be " +\
"used as is."
# Remove id numbers from the data
_ = survey_data_header.pop(-1)
_ = training_outcomes_header.pop(0)
survey_data_ids = [line.pop(-1) for line in survey_data]
survey_data_matched_to_outcomes_ids = [line.pop(-1) for line in survey_data_matched]
training_outcomes_ids = [line.pop(0) for line in training_outcomes]
training_outcomes_NAall_removed_ids = [line.pop(0) for line in outcomes_NAall_removed]
training_outcomes_matched_to_outcomes_ids = [line.pop(0) for line in training_outcomes_matched]
print "Done with input and processing."
return {'survey_data_header': survey_data_header,
'survey_data': survey_data,
'survey_data_ids': survey_data_ids,
'survey_data_matched_to_outcomes': survey_data_matched,
'survey_data_matched_to_outcomes_ids': survey_data_matched_to_outcomes_ids,
'training_outcomes_header': training_outcomes_header,
#'training_outcomes': training_outcomes,
#'training_outcomes_ids': training_outcomes_ids,
#'training_outcomes_NAall_removed': outcomes_NAall_removed,
#'training_outcomes_NAall_removed_ids': training_outcomes_NAall_removed_ids,
'training_outcomes_matched_to_outcomes': training_outcomes_matched,
'training_outcomes_matched_to_outcomes_ids': training_outcomes_matched_to_outcomes_ids}
pickle_file_name = "ffc_data.p"
def save_data_as_pickle(data, path=pickle_file_name):
pickle.dump(data, open(path, 'wb'))
def open_pickle_of_input_data(path=pickle_file_name):
return pickle.load(open(path,'rb'))
def check_if_data_exists_if_not_open_and_read(path=pickle_file_name, remove_bad_columns=True):
if os.path.isfile(path):
print "Pickle file already exists, just reading it in."
print ""
print ""
return open_pickle_of_input_data(path)
else:
print "Pickle file does not exist, now reading in and processing data"
print ""
print ""
data_loaded = data_open_and_process(remove_bad_columns=remove_bad_columns)
save_data_as_pickle(data_loaded)
return data_loaded
def precision_recall_etc(classification, actual_classification):
"""Given a pair of classifications and actual classifications,
calculates various assessment parameters of the classification
Parameters calculated: precision, recall, specificity, NPV, f1,
tp (true positive), tn (true negative), fp (false positive),
fn (false negative), accuracy
Arguments:
classification {[type]} -- the classifications you want to evaluate
actual_classification {[list-like]} -- the reference, actual
classifications to evaluate against
Returns:
dict -- a dictionary which can access all the values in the
description above, with keys matching the values in the
description above.
Raises:
RuntimeError -- len() of the two function arguments not the same
"""
if len(classification) != len(actual_classification): # if lengths don't match
raise RuntimeError("Lengths of arguments to accuracy_percentage \
| |
bool
_os: str
_platform: str
_python: str
_require_service: str
_runqueue_item_id: str
_save_requirements: bool
_service_transport: str
_start_datetime: datetime
_start_time: float
_tmp_code_dir: str
_tracelog: str
_unsaved_keys: Sequence[str]
_windows: bool
allow_val_change: bool
anonymous: str
api_key: str
base_url: str # The base url for the wandb api
code_dir: str
config_paths: Sequence[str]
console: str
deployment: str
disable_code: bool
disable_git: bool
disabled: bool # Alias for mode=dryrun, not supported yet
docker: str
email: str
entity: str
files_dir: str
force: bool
git_remote: str
heartbeat_seconds: int
host: str
ignore_globs: Tuple[str]
is_local: bool
label_disable: bool
launch: bool
launch_config_path: str
log_dir: str
log_internal: str
log_symlink_internal: str
log_symlink_user: str
log_user: str
login_timeout: float
magic: Union[str, bool, dict]
mode: str
notebook_name: str
problem: str
program: str
program_relpath: str
project: str
project_url: str
quiet: bool
reinit: bool
relogin: bool
resume: Union[str, int, bool]
resume_fname: str
resumed: bool # indication from the server about the state of the run (different from resume - user provided flag)
root_dir: str
run_group: str
run_id: str
run_job_type: str
run_mode: str
run_name: str
run_notes: str
run_tags: Tuple[str]
run_url: str
sagemaker_disable: bool
save_code: bool
settings_system: str
settings_workspace: str
show_colors: bool
show_emoji: bool
show_errors: bool
show_info: bool
show_warnings: bool
silent: bool
start_method: str
strict: bool
summary_errors: int
summary_warnings: int
sweep_id: str
sweep_param_path: str
sweep_url: str
symlink: bool
sync_dir: str
sync_file: str
sync_symlink_latest: str
system_sample: int
system_sample_seconds: int
timespec: str
tmp_dir: str
username: str
wandb_dir: str
table_raise_on_max_row_limit_exceeded: bool
def _default_props(self) -> Dict[str, Dict[str, Any]]:
"""
Helper method that is used in `__init__` together with the class attributes
to initialize instance attributes (individual settings) as Property objects.
Note that key names must be the same as the class attribute names.
"""
return dict(
_disable_meta={"preprocessor": _str_as_bool},
_disable_stats={"preprocessor": _str_as_bool},
_disable_viewer={"preprocessor": _str_as_bool},
_colab={
"hook": lambda _: "google.colab" in sys.modules,
"auto_hook": True,
},
_console={"hook": lambda _: self._convert_console(), "auto_hook": True},
_internal_check_process={"value": 8},
_internal_queue_timeout={"value": 2},
_jupyter={
"hook": lambda _: str(_get_python_type()) != "python",
"auto_hook": True,
},
_kaggle={"hook": lambda _: util._is_likely_kaggle(), "auto_hook": True},
_noop={"hook": lambda _: self.mode == "disabled", "auto_hook": True},
_offline={
"hook": (
lambda _: True
if self.disabled or (self.mode in ("dryrun", "offline"))
else False
),
"auto_hook": True,
},
_platform={"value": util.get_platform_name()},
_save_requirements={"value": True, "preprocessor": _str_as_bool},
_tmp_code_dir={
"value": "code",
"hook": lambda x: self._path_convert(self.tmp_dir, x),
},
_windows={
"hook": lambda _: platform.system() == "Windows",
"auto_hook": True,
},
anonymous={"validator": self._validate_anonymous},
api_key={"validator": self._validate_api_key},
base_url={
"value": "https://api.wandb.ai",
"preprocessor": lambda x: str(x).strip().rstrip("/"),
"validator": self._validate_base_url,
},
console={"value": "auto", "validator": self._validate_console},
deployment={
"hook": lambda _: "local" if self.is_local else "cloud",
"auto_hook": True,
},
disable_code={"preprocessor": _str_as_bool},
disable_git={"preprocessor": _str_as_bool},
disabled={"value": False, "preprocessor": _str_as_bool},
files_dir={
"value": "files",
"hook": lambda x: self._path_convert(
self.wandb_dir, f"{self.run_mode}-{self.timespec}-{self.run_id}", x
),
},
force={"preprocessor": _str_as_bool},
git_remote={"value": "origin"},
heartbeat_seconds={"value": 30},
ignore_globs={
"value": tuple(),
"preprocessor": lambda x: tuple(x) if not isinstance(x, tuple) else x,
},
is_local={
"hook": (
lambda _: self.base_url != "https://api.wandb.ai"
if self.base_url is not None
else False
),
"auto_hook": True,
},
label_disable={"preprocessor": _str_as_bool},
launch={"preprocessor": _str_as_bool},
log_dir={
"value": "logs",
"hook": lambda x: self._path_convert(
self.wandb_dir, f"{self.run_mode}-{self.timespec}-{self.run_id}", x
),
},
log_internal={
"value": "debug-internal.log",
"hook": lambda x: self._path_convert(self.log_dir, x),
},
log_symlink_internal={
"value": "debug-internal.log",
"hook": lambda x: self._path_convert(self.wandb_dir, x),
},
log_symlink_user={
"value": "debug.log",
"hook": lambda x: self._path_convert(self.wandb_dir, x),
},
log_user={
"value": "debug.log",
"hook": lambda x: self._path_convert(self.log_dir, x),
},
login_timeout={"preprocessor": lambda x: float(x)},
mode={"value": "online", "validator": self._validate_mode},
problem={"value": "fatal", "validator": self._validate_problem},
project={"validator": self._validate_project},
project_url={"hook": lambda _: self._project_url(), "auto_hook": True},
quiet={"preprocessor": _str_as_bool},
reinit={"preprocessor": _str_as_bool},
relogin={"preprocessor": _str_as_bool},
resume_fname={
"value": "wandb-resume.json",
"hook": lambda x: self._path_convert(self.wandb_dir, x),
},
resumed={"value": "False", "preprocessor": _str_as_bool},
run_mode={
"hook": lambda _: "offline-run" if self._offline else "run",
"auto_hook": True,
},
run_tags={
"preprocessor": lambda x: tuple(x) if not isinstance(x, tuple) else x,
},
run_url={"hook": lambda _: self._run_url(), "auto_hook": True},
sagemaker_disable={"preprocessor": _str_as_bool},
save_code={"preprocessor": _str_as_bool},
settings_system={
"value": os.path.join("~", ".config", "wandb", "settings"),
"hook": lambda x: self._path_convert(x),
},
settings_workspace={
"value": "settings",
"hook": lambda x: self._path_convert(self.wandb_dir, x),
},
show_colors={"preprocessor": _str_as_bool},
show_emoji={"preprocessor": _str_as_bool},
show_errors={"value": "True", "preprocessor": _str_as_bool},
show_info={"value": "True", "preprocessor": _str_as_bool},
show_warnings={"value": "True", "preprocessor": _str_as_bool},
silent={"value": "False", "preprocessor": _str_as_bool},
start_method={"validator": self._validate_start_method},
strict={"preprocessor": _str_as_bool},
summary_warnings={
"value": 5,
"preprocessor": lambda x: int(x),
"is_policy": True,
},
sweep_url={"hook": lambda _: self._sweep_url(), "auto_hook": True},
symlink={"preprocessor": _str_as_bool},
sync_dir={
"hook": [
lambda _: self._path_convert(
self.wandb_dir, f"{self.run_mode}-{self.timespec}-{self.run_id}"
)
],
"auto_hook": True,
},
sync_file={
"hook": lambda _: self._path_convert(
self.sync_dir, f"run-{self.run_id}.wandb"
),
"auto_hook": True,
},
sync_symlink_latest={
"value": "latest-run",
"hook": lambda x: self._path_convert(self.wandb_dir, x),
},
system_sample={"value": 15},
system_sample_seconds={"value": 2},
table_raise_on_max_row_limit_exceeded={
"value": False,
"preprocessor": _str_as_bool,
},
timespec={
"hook": (
lambda _: (
datetime.strftime(self._start_datetime, "%Y%m%d_%H%M%S")
if self._start_datetime
else None
)
),
"auto_hook": True,
},
tmp_dir={
"value": "tmp",
"hook": lambda x: (
self._path_convert(
self.wandb_dir,
f"{self.run_mode}-{self.timespec}-{self.run_id}",
x,
)
or tempfile.gettempdir()
),
},
wandb_dir={
"hook": lambda _: _get_wandb_dir(self.root_dir or ""),
"auto_hook": True,
},
)
# helper methods for validating values
@staticmethod
def _validator_factory(hint: Any) -> Callable[[Any], bool]:
"""
Factory for type validators, given a type hint:
Convert the type hint of a setting into a function
that checks if the argument is of the correct type
"""
origin, args = get_origin(hint), get_args(hint)
def helper(x: Any) -> bool:
if origin is None:
return isinstance(x, hint)
elif origin is Union:
return isinstance(x, args) if args is not None else True
else:
return (
isinstance(x, origin) and all(isinstance(y, args) for y in x)
if args is not None
else isinstance(x, origin)
)
return helper
@staticmethod
def _validate_mode(value: str) -> bool:
choices: Set[str] = {"dryrun", "run", "offline", "online", "disabled"}
if value not in choices:
raise UsageError(f"Settings field `mode`: '{value}' not in {choices}")
return True
@staticmethod
def _validate_project(value: Optional[str]) -> bool:
invalid_chars_list = list("/\\#?%:")
if value is not None:
if len(value) > 128:
raise UsageError(
f'Invalid project name "{value}": exceeded 128 characters'
)
invalid_chars = {char for char in invalid_chars_list if char in value}
if invalid_chars:
raise UsageError(
f'Invalid project name "{value}": '
f"cannot contain characters \"{','.join(invalid_chars_list)}\", "
f"found \"{','.join(invalid_chars)}\""
)
return True
@staticmethod
def _validate_start_method(value: str) -> bool:
available_methods = ["thread"]
if hasattr(multiprocessing, "get_all_start_methods"):
available_methods += multiprocessing.get_all_start_methods()
if value not in available_methods:
raise UsageError(
f"Settings field `start_method`: '{value}' not in {available_methods}"
)
return True
@staticmethod
def _validate_console(value: str) -> bool:
# choices = {"auto", "redirect", "off", "file", "iowrap", "notebook"}
choices: Set[str] = {"auto", "redirect", "off", "wrap"}
if value not in choices:
raise UsageError(f"Settings field `console`: '{value}' not in {choices}")
return True
@staticmethod
def _validate_problem(value: str) -> bool:
choices: Set[str] = {"fatal", "warn", "silent"}
if value not in choices:
raise UsageError(f"Settings field `problem`: '{value}' not in {choices}")
return True
@staticmethod
def _validate_anonymous(value: str) -> bool:
choices: Set[str] = {"allow", "must", "never", "false", "true"}
if value not in choices:
raise UsageError(f"Settings field `anonymous`: '{value}' not in {choices}")
return True
@staticmethod
def _validate_api_key(value: str) -> bool:
if len(value) > len(value.strip()):
raise UsageError("API key cannot start or end with whitespace")
# if value.startswith("local") and not self.is_local:
# raise UsageError(
# "Attempting to use a local API key to connect to https://api.wandb.ai"
# )
# todo: move here the logic from sdk/lib/apikey.py
return True
@staticmethod
def _validate_base_url(value: Optional[str]) -> bool:
"""
Validate the base url of the wandb server.
param value: URL to validate
Based on the Django URLValidator, but with a few additional checks.
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, | |
<reponame>zhu-xlab/So2Sat-LCZ-Classification-Demo
"""
Created on Fri June 29 15:09:53 2018
@author: <NAME>
"""
# Last modified: 10.04.2020 00:22:09 <NAME>
# commented messages
import os
import glob
import numpy as np
from osgeo import gdal
import sys
gdal.UseExceptions()
def saveProbabilityPrediction(probPred,tiffPath):
# this function save the predicted probabilities
try:
fid = gdal.Open(tiffPath)
except RuntimeError as e:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("ERROR: the given data geotiff can not be open by GDAL")
print("DIRECTORY: "+tiffPath)
print("GDAL EXCEPCTION: "+e)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
sys.exit(1)
row = fid.RasterYSize
col = fid.RasterXSize
bnd = fid.RasterCount
proj = fid.GetProjection()
geoInfo = fid.GetGeoTransform()
del(fid)
if probPred.shape[0] != row * col:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("ERROR: number of patches does not suit the output size")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
sys.exit(1)
#print(np.median(probPred[:]))
probPred = np.array(probPred*1e4)
#print(np.median(probPred[:]))
probPred = probPred.astype(np.int16)
#print(np.median(probPred[:]))
#print(probPred.shape)
prob = np.transpose(np.reshape(probPred,(row,col,17)),(2,0,1))
#print(probPred.shape)
LCZDriver = gdal.GetDriverByName('GTiff')
LCZFile = LCZDriver.Create(tiffPath, col, row, bnd, gdal.GDT_UInt16)
LCZFile.SetProjection(proj)
LCZFile.SetGeoTransform(geoInfo)
# save file with int zeros
idBnd = np.arange(0,bnd,dtype=int)
for idxBnd in idBnd:
outBand = LCZFile.GetRasterBand(int(idxBnd+1))
outBand.WriteArray(prob[idxBnd,:,:].astype(np.int16))
outBand.FlushCache()
del(outBand)
return tiffPath
def saveLabelPrediction(labPred,tiffPath):
# this function save the predicted label
try:
fid = gdal.Open(tiffPath)
except RuntimeError as e:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("ERROR: the given data geotiff can not be open by GDAL")
print("DIRECTORY: "+tiffPath)
print("GDAL EXCEPCTION: "+e)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
sys.exit(1)
row = fid.RasterYSize
col = fid.RasterXSize
bnd = fid.RasterCount
proj = fid.GetProjection()
geoInfo = fid.GetGeoTransform()
del(fid)
if labPred.shape[0] != row * col:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("ERROR: number of patches does not suit the output size")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
sys.exit(1)
#print(labPred.shape)
#print(np.median(labPred[:]))
lab = np.reshape(labPred,(row,col))+1
# lab = prob.argmax(axis=2).astype(np.uint8)+1
#print(lab.shape)
#print(np.median(lab[:]))
LCZDriver = gdal.GetDriverByName('GTiff')
#print('1')
LCZFile = LCZDriver.Create(tiffPath, col, row, bnd, gdal.GDT_UInt16)
#print('2')
LCZFile.SetProjection(proj)
#print('3')
LCZFile.SetGeoTransform(geoInfo)
# save file with predicted label
#print('4')
outBand = LCZFile.GetRasterBand(1)
#print('5')
outBand.WriteArray(lab)
#print('6')
outBand.FlushCache()
#print('7')
del(outBand)
#print('8')
return tiffPath
def getPatch(data,imageCoord,patchsize):
# this function gets data patch with give image coordinate and patch size
halfPatchSize = np.int(np.floor(patchsize/2))
outData = np.lib.pad(data,((0,0),(halfPatchSize,halfPatchSize),(halfPatchSize,halfPatchSize)),'symmetric')
outData = np.transpose(outData,(1,2,0))
imageCoord = imageCoord + halfPatchSize
print('INFO: Array size: ' + str(imageCoord.shape[0]) + ',' + str(patchsize) + ',' + str(patchsize) + ',' + str(data.shape[0]))
dataPatch = np.zeros((imageCoord.shape[0],patchsize,patchsize,data.shape[0]),dtype=float)
for i in range(0,imageCoord.shape[0]):
#print(i)
dataPatch[i,:,:,:] = outData[imageCoord[i,1]-halfPatchSize:imageCoord[i,1]+halfPatchSize,imageCoord[i,0]-halfPatchSize:imageCoord[i,0]+halfPatchSize,:]
return dataPatch
def getImageCoordByXYCoord(coord,path2Data):
# this function gets data patches by given coordinates
# Input:
# - coord -- coordinate [x,y]
# - path2Data -- path to unfiltered tiff data
#
# Output:
# - imageCoord -- image coordinate of the input real world coordiate
#
try:
fid = gdal.Open(path2Data)
except RuntimeError as e:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("ERROR: the given ground truth geotiff can not be open by GDAL")
print("DIRECTORY: "+gtPath)
print("GDAL EXCEPCTION: "+e)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
sys.exit(1)
geoInfoData = fid.GetGeoTransform()
imageCoord = np.zeros(coord.shape)
imageCoord[:,0] = np.round((coord[:,0] - geoInfoData[0])/geoInfoData[1])
imageCoord[:,1] = np.round((geoInfoData[3] - coord[:,1])/np.abs(geoInfoData[5]))
return imageCoord.astype(int)
def getCoordLCZGrid(lczPath):
# this function gets the coordinate of every cell for the LCZ classification map
# Input:
# - lczPath -- path to a initialed lcz classification map grid
#
# Output:
# - coordCell -- the coordinate of each cell of the map grid. A N by 2 array with N is the number of cell, 1st col is x-coordinate, 2nd col is y-coordinate, The coordinate organized line by line.
#
try:
fid = gdal.Open(lczPath)
except RuntimeError as e:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("ERROR: the given ground truth geotiff can not be open by GDAL")
print("DIRECTORY: "+gtPath)
print("GDAL EXCEPCTION: "+e)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
sys.exit(1)
# read the grid and find coordinate of each cell
row_cell = np.arange(0,fid.RasterYSize)
col_cell = np.arange(0,fid.RasterXSize)
geoInfoGrid = fid.GetGeoTransform()
xWorld = geoInfoGrid[0] + col_cell * geoInfoGrid[1];
yWorld = geoInfoGrid[3] + row_cell * geoInfoGrid[5];
[xWorld,yWorld] = np.meshgrid(xWorld,yWorld)
coordCell = np.transpose(np.stack((np.ravel(xWorld),np.ravel(yWorld)),axis=0))
return coordCell
def initialLCZGrids_demo(tiffData):
# this function initial a LCZ label grid and a LCZ probability grid.
# Input:
# - tiffData -- path to sentinel-1 tiff data
# -- EXAMPLE: tiffData = '/data/hu/global_processing/01692_22142_Katowice/geocoded_subset_unfilt_dat/201706/S1B_IW_SLC__1SDV_20170601T045205_20170601T045232_005852_00A42A_504B_Orb_Cal_Deb_TC_SUB.tif'
#
# Output:
# - return 0 -- a LCZ tiff initialized
# -- EXAMPLE:
#
# number of bands, number of LCZ classes
nbBnd = 17
# read geoinformation from the sentinel-1 tiff data
dataFile = gdal.Open(tiffData)
dataCoordSys = np.array(dataFile.GetGeoTransform())
dataCol = dataFile.RasterXSize
dataRow = dataFile.RasterYSize
# set geoinformation for the output LCZ label grid
# set resolution and coordinate for upper-left point
LCZCoordSys = dataCoordSys.copy()
LCZCoordSys[1] = 100
LCZCoordSys[5] = -100
LCZCol = np.arange(dataCoordSys[0],dataCoordSys[0]+dataCol*dataCoordSys[1],LCZCoordSys[1]).shape[0]
LCZRow = np.arange(dataCoordSys[3],dataCoordSys[3]+dataRow*dataCoordSys[5],LCZCoordSys[5]).shape[0]
# set the directory of initial grid
savePath = '/'.join(tiffData.split('/')[:-1])
savePath = savePath.replace(tiffData.split('/')[-3],'LCZ_ResNet')
if not os.path.exists(savePath):
os.makedirs(savePath)
# initial the grid and set resolution, projection, and coordinate
LCZDriver = gdal.GetDriverByName('GTiff')
LCZTiffPath = savePath+'/LCZLabel.tif'
LCZFile = LCZDriver.Create(savePath+'/LCZLabel.tif', LCZCol, LCZRow)
LCZFile.SetProjection(dataFile.GetProjection())
LCZFile.SetGeoTransform(LCZCoordSys)
# save file with int zeros
LCZLabel = np.zeros((LCZRow,LCZCol),dtype = int)
outBand = LCZFile.GetRasterBand(1)
outBand.WriteArray(LCZLabel)
outBand.FlushCache()
del(LCZDriver)
del(LCZFile)
del(outBand)
del(LCZLabel)
# initial the grid and set resolution, projection, and coordinate
LCZDriver = gdal.GetDriverByName('GTiff')
LCZFile = LCZDriver.Create(savePath+'/LCZProb.tif', LCZCol, LCZRow, nbBnd)
LCZFile.SetProjection(dataFile.GetProjection())
LCZFile.SetGeoTransform(LCZCoordSys)
# save file with int zeros
LCZLabel = np.zeros((LCZRow,LCZCol))
idBnd = np.arange(0,nbBnd,dtype=int)
for idxBnd in idBnd:
outBand = LCZFile.GetRasterBand(int(idxBnd+1))
outBand.WriteArray(LCZLabel)
outBand.FlushCache()
del(outBand)
outputpath = [savePath+'/LCZLabel.tif',savePath+'/LCZProb.tif']
return outputpath
def initialLCZGrids(tiffData):
# this function initial a LCZ label grid and a LCZ probability grid.
# Input:
# - tiffData -- path to sentinel-1 tiff data
# -- EXAMPLE: tiffData = '/data/hu/global_processing/01692_22142_Katowice/geocoded_subset_unfilt_dat/201706/S1B_IW_SLC__1SDV_20170601T045205_20170601T045232_005852_00A42A_504B_Orb_Cal_Deb_TC_SUB.tif'
#
# Output:
# - return 0 -- a LCZ tiff initialized
# -- EXAMPLE:
#
# number of bands, number of LCZ classes
nbBnd = 17
# read geoinformation from the sentinel-1 tiff data
dataFile = gdal.Open(tiffData)
dataCoordSys = np.array(dataFile.GetGeoTransform())
dataCol = dataFile.RasterXSize
dataRow = dataFile.RasterYSize
# set geoinformation for the output LCZ label grid
# set resolution and coordinate for upper-left point
LCZCoordSys = dataCoordSys.copy()
LCZCoordSys[1] = 100
LCZCoordSys[5] = -100
LCZCol = np.arange(dataCoordSys[0],dataCoordSys[0]+dataCol*dataCoordSys[1],LCZCoordSys[1]).shape[0]
LCZRow = np.arange(dataCoordSys[3],dataCoordSys[3]+dataRow*dataCoordSys[5],LCZCoordSys[5]).shape[0]
# set the directory of initial grid
savePath = '/'.join(tiffData.split('/')[:-1])
savePath = savePath.replace(tiffData.split('/')[-3],'LCZClaMap')
if not os.path.exists(savePath):
os.makedirs(savePath)
# initial the grid and set resolution, projection, and coordinate
LCZDriver = gdal.GetDriverByName('GTiff')
LCZTiffPath = savePath+'/LCZLabel.tif'
LCZFile = LCZDriver.Create(savePath+'/LCZLabel.tif', LCZCol, LCZRow)
LCZFile.SetProjection(dataFile.GetProjection())
LCZFile.SetGeoTransform(LCZCoordSys)
# save file with int zeros
LCZLabel = np.zeros((LCZRow,LCZCol),dtype = int)
outBand = LCZFile.GetRasterBand(1)
outBand.WriteArray(LCZLabel)
outBand.FlushCache()
del(LCZDriver)
del(LCZFile)
del(outBand)
del(LCZLabel)
# initial the grid and set resolution, projection, and coordinate
LCZDriver = gdal.GetDriverByName('GTiff')
LCZFile = LCZDriver.Create(savePath+'/LCZProb.tif', LCZCol, LCZRow, nbBnd)
LCZFile.SetProjection(dataFile.GetProjection())
LCZFile.SetGeoTransform(LCZCoordSys)
# save file with int zeros
LCZLabel = np.zeros((LCZRow,LCZCol))
idBnd = np.arange(0,nbBnd,dtype=int)
for idxBnd in idBnd:
outBand = LCZFile.GetRasterBand(int(idxBnd+1))
outBand.WriteArray(LCZLabel)
outBand.FlushCache()
del(outBand)
outputpath = [savePath+'/LCZLabel.tif',savePath+'/LCZProb.tif']
return outputpath
def initialLCZLabelGrid(tiffData,outGridDir):
# this function initial a 100 meter resolution LCZ label grid based on input geotiff.
# Input:
# - tiffData -- path to sentinel-1 or senitnel-2 tiff data
# - outGridDir -- path to initialed lcz label geotiff
#
# Output:
# - return 0 -- a LCZ tiff initialized
# read geoinformation from the sentinel-1 tiff data
dataFile = gdal.Open(tiffData)
dataCoordSys = np.array(dataFile.GetGeoTransform())
dataCol = dataFile.RasterXSize
dataRow = dataFile.RasterYSize
# set geoinformation for the output LCZ label grid
# set resolution and coordinate for upper-left point
LCZCoordSys = dataCoordSys.copy()
LCZCoordSys[1] = 100
LCZCoordSys[5] = -100
LCZCol = np.arange(dataCoordSys[0],dataCoordSys[0]+dataCol*dataCoordSys[1],LCZCoordSys[1]).shape[0]
LCZRow = np.arange(dataCoordSys[3],dataCoordSys[3]+dataRow*dataCoordSys[5],LCZCoordSys[5]).shape[0]
# set the directory of initial grid
savePath = '/'.join(outGridDir.split('/')[:-1])
if not os.path.exists(savePath):
os.makedirs(savePath)
# initial the grid and set resolution, projection, and coordinate
LCZDriver = gdal.GetDriverByName('GTiff')
LCZFile = LCZDriver.Create(outGridDir, LCZCol, LCZRow)
LCZFile.SetProjection(dataFile.GetProjection())
LCZFile.SetGeoTransform(LCZCoordSys)
# save file with int zeros
LCZLabel = np.zeros((LCZRow,LCZCol),dtype = np.int8)
outBand = LCZFile.GetRasterBand(1)
outBand.WriteArray(LCZLabel)
outBand.FlushCache()
return 0
def initialLCZProbGrid(tiffData,outGridDir):
# this function initial a 100 meter resolution LCZ label grid based on input geotiff.
# Input:
# - tiffData -- path to sentinel-1 or sentinel-2 tiff data
# - outGridDir -- path to initialized softmax geotiff file
#
# Output:
# - return 0 -- a LCZ tiff initialized with 17 bands, each of them presents a probability that the pixel falls under a class
# -- EXAMPLE:
#
# number of bands, number of LCZ classes
nbBnd = 17
# read geoinformation from the sentinel-1 tiff data
dataFile = gdal.Open(tiffData)
dataCoordSys = np.array(dataFile.GetGeoTransform())
dataCol = dataFile.RasterXSize
dataRow = dataFile.RasterYSize
# set geoinformation for the output LCZ label grid
# set resolution and coordinate for upper-left point
LCZCoordSys = dataCoordSys.copy()
LCZCoordSys[1] = 100
LCZCoordSys[5] = -100
LCZCol = np.arange(dataCoordSys[0],dataCoordSys[0]+dataCol*dataCoordSys[1],LCZCoordSys[1]).shape[0]
LCZRow = np.arange(dataCoordSys[3],dataCoordSys[3]+dataRow*dataCoordSys[5],LCZCoordSys[5]).shape[0]
# set the directory of initial grid
savePath = '/'.join(outGridDir.split('/')[:-1])
if not os.path.exists(savePath):
os.makedirs(savePath)
# initial the grid and set resolution, projection, and coordinate
LCZDriver = gdal.GetDriverByName('GTiff')
LCZFile = LCZDriver.Create(outGridDir, LCZCol, LCZRow, nbBnd)
LCZFile.SetProjection(dataFile.GetProjection())
LCZFile.SetGeoTransform(LCZCoordSys)
# save file with int zeros
LCZProb = np.zeros((LCZRow,LCZCol),dtype = np.int16)
nbBnd = 17
idBnd = np.arange(0,nbBnd,dtype=int)
for idxBnd in idBnd:
outBand | |
Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
# Mincut
new_root_ids = cgraph.remove_edges(
"<NAME>", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0),
[0, 0, 0], [2*cgraph.chunk_size[0], 2*cgraph.chunk_size[1], cgraph.chunk_size[2]],
mincut=True)
# Check New State
assert len(new_root_ids) == 2
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) != cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 0
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 1, 0, 0, 0))
assert len(partners) == 0
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 0, 0, 0, 0) in leaves
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 1, 0, 0, 0) in leaves
@pytest.mark.timeout(30)
def test_cut_no_link(self, gen_graph):
"""
No connection between 1 and 2
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 2 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Mincut
with pytest.raises(cg_exceptions.PreconditionError):
cgraph.remove_edges(
"<NAME>", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0),
[0, 0, 0], [2*cgraph.chunk_size[0], 2*cgraph.chunk_size[1], cgraph.chunk_size[2]],
mincut=True)
res_new = cgraph.table.read_rows()
res_new.consume_all()
assert res_new.rows == res_old.rows
@pytest.mark.timeout(30)
def test_cut_old_link(self, gen_graph):
"""
Link between 1 and 2 got removed previously (aff = 0.0)
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1┅┅╎┅┅2 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
cgraph.remove_edges("<NAME>", to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Mincut
with pytest.raises(cg_exceptions.PreconditionError):
cgraph.remove_edges(
"<NAME>", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0),
[0, 0, 0], [2*cgraph.chunk_size[0], 2*cgraph.chunk_size[1], cgraph.chunk_size[2]],
mincut=True)
res_new = cgraph.table.read_rows()
res_new.consume_all()
assert res_new.rows == res_old.rows
@pytest.mark.timeout(30)
def test_cut_indivisible_link(self, gen_graph):
"""
Sink: 1, Source: 2
Link between 1 and 2 is set to `inf` and must not be cut.
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1══╪══2 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0), inf)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 0), inf)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
original_parents_1 = cgraph.get_all_parents(
to_label(cgraph, 1, 0, 0, 0, 0))
original_parents_2 = cgraph.get_all_parents(
to_label(cgraph, 1, 1, 0, 0, 0))
# Mincut
assert cgraph.remove_edges(
"<NAME>", to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0),
[0, 0, 0], [2 * cgraph.chunk_size[0], 2 * cgraph.chunk_size[1],
cgraph.chunk_size[2]],
mincut=True) is None
new_parents_1 = cgraph.get_all_parents(to_label(cgraph, 1, 0, 0, 0, 0))
new_parents_2 = cgraph.get_all_parents(to_label(cgraph, 1, 1, 0, 0, 0))
assert np.all(np.array(original_parents_1) == np.array(new_parents_1))
assert np.all(np.array(original_parents_2) == np.array(new_parents_2))
class TestGraphMultiCut:
@pytest.mark.timeout(30)
def test_cut_multi_tree(self, gen_graph):
pass
class TestGraphHistory:
""" These test inadvertantly also test merge and split operations """
@pytest.mark.timeout(30)
def test_cut_merge_history(self, gen_graph):
"""
Regular link between 1 and 2
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1━━┿━━2 │
│ │ │
└─────┴─────┘
(1) Split 1 and 2
(2) Merge 1 and 2
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
first_root = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))
assert first_root == cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
timestamp_before_split = datetime.utcnow()
split_roots = cgraph.remove_edges("<NAME>",
to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0),
mincut=False)
assert len(split_roots) == 2
timestamp_after_split = datetime.utcnow()
merge_roots = cgraph.add_edges("<NAME>",
[to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0)],
affinities=.4)
assert len(merge_roots) == 1
merge_root = merge_roots[0]
timestamp_after_merge = datetime.utcnow()
assert len(cgraph.get_root_id_history(first_root,
time_stamp_past=datetime.min,
time_stamp_future=datetime.max)) == 4
assert len(cgraph.get_root_id_history(split_roots[0],
time_stamp_past=datetime.min,
time_stamp_future=datetime.max)) == 3
assert len(cgraph.get_root_id_history(split_roots[1],
time_stamp_past=datetime.min,
time_stamp_future=datetime.max)) == 3
assert len(cgraph.get_root_id_history(merge_root,
time_stamp_past=datetime.min,
time_stamp_future=datetime.max)) == 4
new_roots, old_roots = cgraph.get_delta_roots(timestamp_before_split,
timestamp_after_split)
assert(len(old_roots)==1)
assert(old_roots[0]==first_root)
assert(len(new_roots)==2)
assert(np.all(np.isin(new_roots, split_roots)))
new_roots2, old_roots2 = cgraph.get_delta_roots(timestamp_after_split,
timestamp_after_merge)
assert(len(new_roots2)==1)
assert(new_roots2[0]==merge_root)
assert(len(old_roots2)==2)
assert(np.all(np.isin(old_roots2, split_roots)))
new_roots3, old_roots3 = cgraph.get_delta_roots(timestamp_before_split,
timestamp_after_merge)
assert(len(new_roots3)==1)
assert(new_roots3[0]==merge_root)
assert(len(old_roots3)==1)
assert(old_roots3[0]==first_root)
class TestGraphLocks:
@pytest.mark.timeout(30)
def test_lock_unlock(self, gen_graph):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
(1) Try lock (opid = 1)
(2) Try lock (opid = 2)
(3) Try unlock (opid = 1)
(4) Try lock (opid = 2)
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
operation_id_1 = cgraph.get_unique_operation_id()
root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_1)[0]
operation_id_2 = cgraph.get_unique_operation_id()
assert not cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2)[0]
assert cgraph.unlock_root(root_id=root_id,
operation_id=operation_id_1)
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2)[0]
@pytest.mark.timeout(30)
def test_lock_expiration(self, gen_graph, lock_expired_timedelta_override):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
(1) Try lock (opid = 1)
(2) Try lock (opid = 2)
(3) Try lock (opid = 2) with retries
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
operation_id_1 = cgraph.get_unique_operation_id()
root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_1)[0]
operation_id_2 = cgraph.get_unique_operation_id()
assert not cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2)[0]
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2,
max_tries=10, waittime_s=.5)[0]
@pytest.mark.timeout(30)
def test_lock_renew(self, gen_graph):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
(1) Try lock (opid = 1)
(2) Try lock (opid = 2)
(3) Try lock (opid = 2) with retries
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
operation_id_1 = cgraph.get_unique_operation_id()
root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_1)[0]
assert cgraph.check_and_renew_root_locks(root_ids=[root_id],
operation_id=operation_id_1)
@pytest.mark.timeout(30)
def test_lock_merge_lock_old_id(self, gen_graph):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
(1) Merge (includes lock opid 1)
(2) Try lock opid 2 --> should be successful and return new root id
| |
vpc_name
) # Add : to separate vpc name from ou/account
else:
attachment_name += vpc_name
if attachment_name != "": # If the name is not null tag it:
truncated_attachment_name = attachment_name[:255]
self.event["AttachmentTagsRequired"]["Name"] = truncated_attachment_name
self.logger.debug(
f"The appended TGW attachment is {truncated_attachment_name}"
)
def _update_event_with_vpc_tags(self, tags):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
self.logger.info(
"Update event with VPC tags if the event source is 'Subnet'"
)
if isinstance(tags, list):
for tag in tags:
self._match_keys_with_tag(tag.get("Key"), tag.get("Value"))
elif isinstance(tags, dict):
for key, value in tags.items():
self._match_keys_with_tag(key, value)
if "AttachmentTagsRequired" not in self.event:
self.event.update({"AttachmentTagsRequired": {}})
self._update_event_with_tgw_attachment_name()
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _describe_subnet(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ec2 = self._session(self.spoke_account_id)
# describe the subnet
response = ec2.describe_subnets(self.event.get("SubnetId"))
self._print("Describe Subnet", response)
# the response should return a list with single item
self._check_list_length(response, 1)
# update event with subnet details
index = 0
subnet = response[index]
# vpc id associated with this subnet
self.event.update({"VpcId": subnet.get("VpcId")})
# availability zone
self.event.update(
{"AvailabilityZone": subnet.get("AvailabilityZone")}
)
tag_key_list = []
for tag in subnet.get("Tags"):
tag_key_list.append(tag.get("Key").lower().strip())
self._print("list of tag keys", tag_key_list)
# check of tags exist for the subnet
if environ.get("ATTACHMENT_TAG").lower().strip() in tag_key_list:
self.logger.info(
"Found attachment tag for the subnet: {}".format(
self.event.get("SubnetId")
)
)
# help us decide if we can remove this subnet from the attachment
self.event.update({"SubnetTagFound": "yes"})
else:
self.event.update({"SubnetTagFound": "no"})
return self.event
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _describe_route_tables_for_subnet(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ec2 = self._session(self.spoke_account_id)
# describe the explicit route table association with the subnet
response = ec2.describe_route_tables_for_subnet(
self.event.get("SubnetId")
)
self._print("Describe Route Table for Subnets", response)
# handle scenario of there is no EXPLICIT ASSOCIATION between the subnet and route table
if len(response) != 0:
# update event with subnet details
index = 0
route_table = response[index]
# route table associated with this subnet
self.event.update(
{"RouteTableId": route_table.get("RouteTableId")}
)
routes = route_table.get("Routes")
return routes
else:
self.logger.info(
"There is no explicit route table association with the tagged subnet: {}".format(
self.event.get("SubnetId")
)
)
self.event.update({"RouteTableId": "No-Explicit-RT"})
return None
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _find_existing_default_route(self, existing_routes, destination_route):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
gateway_id = None
# set default flags
self.event.update({"DefaultRouteToTgwExists": "no"})
self.event.update({"DestinationRouteExists": "no"})
for route in existing_routes:
if route.get("DestinationCidrBlock") == destination_route:
# if destination route already exists in the route table - set flag
self.event.update({"DestinationRouteExists": "yes"})
self.logger.info(
"Found route: {} in the route table.".format(
destination_route
)
)
# Check if default route has Transit gateway as the target
if route.get("TransitGatewayId") is not None:
comment = "Found Transit Gateway as a target to the default route: {}".format(
destination_route
)
self.event.update({"DefaultRouteToTgwExists": "yes"})
self.logger.info(comment)
gateway_id = route.get("TransitGatewayId")
self._print("Transit Gateway Id", gateway_id)
# Check if default route has Internet gateway as the target
elif route.get("GatewayId") is not None:
comment = "Found existing gateway as a target to the default route: {}".format(
destination_route
)
self.logger.info(comment)
gateway_id = route.get("GatewayId")
self._print("Gateway Id", gateway_id)
# Check if default route has NAT gateway as the target
elif route.get("NatGatewayId") is not None:
comment = "Found NAT Gateway as a target to the default route: {}".format(
destination_route
)
self.logger.info(comment)
gateway_id = route.get("NatGatewayId")
self._print("NAT Gateway Id", gateway_id)
elif route.get("VpcPeeringConnectionId") is not None:
comment = "Found VPC Peering Connection as a target to the default route: {}".format(
destination_route
)
self.logger.info(comment)
gateway_id = route.get("VpcPeeringConnectionId")
self._print("Peering Connection Id", gateway_id)
else:
self.logger.info(
"Found an existing target for the default route."
)
gateway_id = "custom-target"
self._print("Route", route)
# update event with gateway id
self.event.update({"GatewayId": gateway_id})
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _create_route(self, ec2, destination):
"""
This function creates routes in the route table associated with the
tagged subnet.
:param ec2: ec2 session
:param destination: destination that would TGW as the
target. Destination can be a CIDR block or prefix list.
:return: None
"""
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
if (
self.event.get("DefaultRouteToTgwExists") == "no"
and self.event.get("DestinationRouteExists") == "no"
):
self.logger.info(
f"Adding destination : {destination} to TGW gateway: "
f"{environ.get('TGW_ID')} into the route table:"
f" {self.event.get('RouteTableId')}"
)
if destination.startswith("pl-"):
ec2.create_route_prefix_list(
destination,
self.event.get("RouteTableId"),
environ.get("TGW_ID"),
)
else:
ec2.create_route_cidr_block(
destination,
self.event.get("RouteTableId"),
environ.get("TGW_ID"),
)
self._create_tag(
self.event.get("RouteTableId"),
"RouteTable",
"Route(s) added to the route table.",
)
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
self._create_tag(
self.event.get("RouteTableId"), "RouteTable-Error", e
)
raise
def _delete_route(self, ec2, destination):
"""
This function deletes routes in the route table associated
with the tagged subnet.
:param ec2: ec2 session
:param destination: destination that would TGW as the
target. Destination can be a CIDR block or prefix list.
:return: None
"""
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
if (
self.event.get("DefaultRouteToTgwExists") == "yes"
and self.event.get("DestinationRouteExists") == "yes"
):
self.logger.info(
f"Removing destination : {destination} "
f"to TGW gateway: {environ.get('TGW_ID')} "
f"from the route table:"
f" {self.event.get('RouteTableId')}"
)
if destination.startswith("pl-"):
ec2.delete_route_prefix_list(
destination, self.event.get("RouteTableId")
)
else:
ec2.delete_route_cidr_block(
destination, self.event.get("RouteTableId")
)
self._create_tag(
self.event.get("RouteTableId"),
"RouteTable",
"Route(s) removed from the route table.",
)
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
self._create_tag(
self.event.get("RouteTableId"), "RouteTable-Error", e
)
raise
def _update_route_table(self, ec2, route):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
# if adding subnet to tgw attachment - create route
# else if removing subnet from tgw attachment - delete route
if (
self.event.get("Action") == "AddSubnet"
or self.event.get("Action") == "CreateTgwVpcAttachment"
):
# create route in spoke account route table
self._create_route(ec2, route)
elif (
self.event.get("Action") == "RemoveSubnet"
or self.event.get("Action") == "DeleteTgwVpcAttachment"
):
# delete route from spoke account route table
self._delete_route(ec2, route)
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def default_route_crud_operations(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
# this condition will be met if VPC tagged not Subnet
if self.event.get("SubnetId") is not None:
ec2 = self._session(self.spoke_account_id)
existing_routes = self._describe_route_tables_for_subnet()
# handles the case if the subnet has no association with
# explicit route table
if existing_routes is None:
return self.event
# allowed values in hub CFN template
# "All-Traffic (0/0)"
# "RFC-1918 (10/8, 172.16/12, 192.168/16)"
# "Custom Destinations"
# "Configure-Manually
quad_zero_route = environ.get("ALL_TRAFFIC") # 0.0.0.0/0
rfc_1918_routes = convert_string_to_list(
environ.get("RFC_1918_ROUTES")
)
if "All-Traffic" in environ.get("DEFAULT_ROUTE"):
self._find_existing_default_route(
existing_routes, quad_zero_route
)
self._update_route_table(ec2, quad_zero_route)
elif "RFC-1918" in environ.get("DEFAULT_ROUTE"):
for route in rfc_1918_routes:
self._find_existing_default_route(
existing_routes, route
)
self._update_route_table(ec2, route)
elif "Custom-Destinations" in environ.get("DEFAULT_ROUTE"):
self.update_route_table_with_cidr_blocks(
ec2, existing_routes
)
self.update_route_table_with_prefix_lists(
ec2, existing_routes
)
elif "Configure-Manually" in environ.get("DEFAULT_ROUTE"):
self.logger.info(
"Admin opted to configure route table manually"
)
return self.event
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def update_route_table_with_cidr_blocks(self, ec2, existing_routes):
cidr_blocks = convert_string_to_list(environ.get("CIDR_BLOCKS"))
if len(cidr_blocks) > 0:
for route in cidr_blocks:
self.logger.info(f"Adding route: {route}")
self._find_existing_default_route(existing_routes, route)
self._update_route_table(ec2, route)
def update_route_table_with_prefix_lists(self, ec2, existing_routes):
prefix_lists = convert_string_to_list(environ.get("PREFIX_LISTS"))
if len(prefix_lists) > 0:
for prefix_list_id in prefix_lists:
self.logger.info(f"Adding prefix list id: {prefix_list_id}")
self._find_existing_default_route(
existing_routes, prefix_list_id
)
self._update_route_table(ec2, prefix_list_id)
def _message(self, method, e):
return {
"FILE": __file__.split("/")[-1],
"CLASS": self.__class__.__name__,
"METHOD": method,
"EXCEPTION": str(e),
}
def _update_ddb_failed(self, e):
self.event.update({"Comment": str(e)})
self.event.update({"Status": "failed"})
ddb = DynamoDb(self.event)
ddb.put_item()
class DynamoDb:
"""
This class contains functions to manage VPC related resources
"""
def __init__(self, event):
self.event = event
self.logger = logging.getLogger(__name__)
self.logger.info(self.__class__.__name__ + CLASS_EVENT)
self.logger.info(event)
def _get_time_to_live(self, time):
utc_time = datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ")
epoch_time = (utc_time - datetime(1970, 1, 1)).total_seconds()
orig = datetime.fromtimestamp(int(epoch_time))
ttl = orig + timedelta(days=int(environ.get("TTL")))
return str(int((ttl - datetime(1970, 1, 1)).total_seconds()))
# return None (string type) if the value is NoneType
def is_none(self, value):
if value is None:
return "None"
else:
return value
def put_item(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ddb = DDB(environ.get("TABLE_NAME"))
# The SubnetId is the hash key for the table, and is used by the UI to get the latest event.
# If there is a association/propagation tag change on an existing VPC already added to the TGW,
# then the SubnetId will be empty (None), and thus the UI will show an entry for the latest
# event with | |
<filename>cumm/conv/main_real.py
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pccm
import torch
import torch.nn.functional as F
from cumm import dtypes
from cumm import tensorview as tv
from cumm.constants import PACKAGE_ROOT
from cumm.conv.bases import NCHW, NHWC, ConvIterAlgo, ConvOpType
from cumm.conv.main import ConvMainUnitTest, gen_gemm_kernels
from cumm.conv.params import ConvProblem
from cumm.gemm import kernel
from cumm.gemm.constants import NVRTCConstants, NVRTCMode
from cumm.nvrtc import CummNVRTCModule, get_cudadevrt_path
os.environ["CUMM_DEBUG"] = "1"
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
def _asdv_test_simt_python():
np.random.seed(12315)
main_cu = ConvMainUnitTest()
lib = pccm.builder.build_pybind([main_cu],
Path(__file__).parent / "imgemm_test",
includes=[
PACKAGE_ROOT / "include",
],
namespace_root=PACKAGE_ROOT / "cumm",
build_dir=Path(__file__).parent / "build" /
"build_unittest_conv",
pybind_file_suffix=".cc",
verbose=False,
disable_anno=True)
lib_object = lib.cumm.conv.main.ConvMainUnitTest()
for params in main_cu.all_params[:6]:
if params.mask_sparse:
continue
# NCHW -> KCRS @ NCRSPQ = NKPQ
print(params.get_algo_name())
ndim = params.ndim
ker = gen_gemm_kernels(params)
# print("START", params.get_algo_name())
if ndim == 3:
HW = [56] * ndim
else:
HW = [244] * ndim
RS = [1] * ndim
N = 1
C = 128
K = 128
padding = [RS[0] // 2] * ndim
stride = [1] * ndim
dilation = [1] * ndim
out_dims = ConvProblem.calc_output_dims_python(HW, RS, padding, stride,
dilation)
PQ = out_dims
op_type = params.op_type
iter_algo = ConvIterAlgo.Analytic
if params.dtype_a == dtypes.int8:
inp = np.random.randint(-2, 2, size=[N, *HW, C]).astype(np.int8)
weight = np.random.randint(-2, 2, size=[K, *RS, C]).astype(np.int8)
output = np.random.randint(-2, 2, size=[N, *PQ, K]).astype(np.int8)
doutput = np.random.randint(-2, 2, size=[N, *PQ,
K]).astype(np.int8)
else:
inp = np.random.uniform(-1, 1, size=[N, *HW, C]).astype(
dtypes.get_npdtype(params.dtype_input))
weight = np.random.uniform(-1, 1, size=[K, *RS, C]).astype(
dtypes.get_npdtype(params.dtype_weight))
output = np.random.uniform(-1, 1, size=[N, *PQ, K]).astype(
dtypes.get_npdtype(params.dtype_output))
doutput = np.random.uniform(-1, 1, size=[N, *PQ, K]).astype(
dtypes.get_npdtype(params.dtype_output))
nhwc_to_nchw_inds = [0, ndim + 1, *range(1, ndim + 1)]
nchw_to_nhwc_inds = [0, *range(2, ndim + 2), 1]
inp_th = torch.from_numpy(inp).permute(*nhwc_to_nchw_inds).to(
torch.float32).contiguous()
weight_th = torch.from_numpy(weight).permute(*nhwc_to_nchw_inds).to(
torch.float32).contiguous()
output_th = torch.from_numpy(output).permute(*nhwc_to_nchw_inds).to(
torch.float32).contiguous()
doutput_th = torch.from_numpy(doutput).permute(*nhwc_to_nchw_inds).to(
torch.float32).contiguous()
if params.dtype_a.itemsize() != 1:
inp_th = inp_th.cuda()
weight_th = weight_th.cuda()
output_th = output_th.cuda()
doutput_th = doutput_th.cuda()
inp_th.requires_grad = True
weight_th.requires_grad = True
th_t = time.time()
if ndim == 1:
out_ref = F.conv1d(inp_th,
weight_th,
padding=padding,
stride=stride,
dilation=dilation)
elif ndim == 2:
out_ref = F.conv2d(inp_th,
weight_th,
padding=padding,
stride=stride,
dilation=dilation)
elif ndim == 3:
out_ref = F.conv3d(inp_th,
weight_th,
padding=padding,
stride=stride,
dilation=dilation)
else:
raise NotImplementedError
torch.cuda.synchronize()
print("TORCH time", time.time() - th_t)
th_t = time.time()
if params.dtype_a.itemsize() != 1:
out_ref.backward(doutput_th)
torch.cuda.synchronize()
print("TORCH BW time", time.time() - th_t)
out_ref_nhwc = out_ref.detach().permute(
*nchw_to_nhwc_inds).contiguous().cpu().numpy()
if params.dtype_a.itemsize() != 1:
din_ref_nhwc = inp_th.grad.detach().permute(
*nchw_to_nhwc_inds).contiguous().cpu().numpy()
dw_ref_nhwc = weight_th.grad.detach().permute(
*nchw_to_nhwc_inds).contiguous().cpu().numpy()
else:
din_ref_nhwc = np.zeros_like(inp)
dw_ref_nhwc = np.zeros_like(weight)
# print("WTF PREPARED")
if params.op_type == ConvOpType.kBackwardInput:
inp_tv = tv.zeros(inp.shape, params.dtype_input.tv_dtype, 0)
else:
inp_tv = tv.from_numpy(inp).cuda()
if params.op_type == ConvOpType.kBackwardWeight:
weight_tv = tv.zeros(weight.shape, params.dtype_weight.tv_dtype, 0)
else:
weight_tv = tv.from_numpy(weight).cuda()
if params.op_type == ConvOpType.kForward:
output_tv = tv.zeros(output.shape, params.dtype_output.tv_dtype, 0)
else:
output_tv = tv.from_numpy(doutput).cuda()
torch.cuda.synchronize()
t = time.time()
# print("CUDA PREPARED")
spk = 1
if op_type == ConvOpType.kBackwardWeight:
# TODO support splitk parallel
spk = 16
for i in range(10):
lib_object.implicit_gemm(
inp_tv,
weight_tv,
output_tv,
padding,
stride,
dilation,
ndim=ndim,
iter_algo_=params.iter_algo.value,
op_type_=params.op_type.value,
i_ltype_=params.layout_desp_input.layout_type.value,
w_ltype_=params.layout_desp_weight.layout_type.value,
o_ltype_=params.layout_desp_output.layout_type.value,
ts=params.ts,
wts=params.wts,
num_stage=params.num_stage,
dacc=params.dtype_acc.tv_dtype,
dcomp=params.dtype_comp.tv_dtype,
algo=params.algo.value,
tensorop=[0, 0, 0],
split_k_slices=spk) # type: tv.Tensor
print(time.time() - t)
if i != 9:
t = time.time()
op_duration = time.time() - t
if params.op_type == ConvOpType.kForward:
output_cpu = output_tv.cpu().numpy()
if params.dtype_a.itemsize() == 1:
output_cpu = output_cpu.astype(np.float32)
duration = time.time() - t
print(output_cpu.reshape(-1)[:10], out_ref_nhwc.reshape(-1)[:10])
print(params.get_algo_name(),
np.linalg.norm(out_ref_nhwc - output_cpu), "Time=",
op_duration)
elif params.op_type == ConvOpType.kBackwardInput:
print(ker.input_spec.tmap_b.iterations)
din_cpu = inp_tv.cpu().numpy()
duration = time.time() - t
print(din_cpu.reshape(-1)[:10], din_ref_nhwc.reshape(-1)[:10])
print(params.get_algo_name(),
np.linalg.norm(din_cpu - din_ref_nhwc), "Time=", op_duration)
else:
dw_cpu = weight_tv.cpu().numpy()
duration = time.time() - t
print(dw_cpu.reshape(-1)[:10], dw_ref_nhwc.reshape(-1)[:10])
print(params.get_algo_name(), np.linalg.norm(dw_cpu - dw_ref_nhwc),
"Time=", op_duration)
def _asdv_test_simt_python_v2():
np.random.seed(12315)
main_cu = ConvMainUnitTest()
lib_object = None
use_nvrtc = True
if not use_nvrtc:
lib = pccm.builder.build_pybind([main_cu],
Path(__file__).parent / "imgemm_test",
includes=[
PACKAGE_ROOT / "include",
],
namespace_root=PACKAGE_ROOT / "cumm",
build_dir=Path(__file__).parent / "build" /
"build_unittest_conv",
pybind_file_suffix=".cc",
verbose=False,
disable_anno=True)
lib_object = lib.cumm.conv.main.ConvMainUnitTest()
algo_cls = tv.gemm.ConvAlgoDesp
params_cls = tv.gemm.ConvParams
a = tv.zeros([3], tv.int32, 0)
nvrtc_mode = NVRTCMode.ConstantMemory
for params in main_cu.all_params:
if params.mask_sparse:
continue
# NCHW -> KCRS @ NCRSPQ = NKPQ
ndim = params.ndim
ker = gen_gemm_kernels(params)
print(ker.get_algo_name())
ker_nvrtc = gen_gemm_kernels(params, nvrtc_mode=nvrtc_mode)
ker_nvrtc.namespace = "wtf"
t = time.time()
custom_names = []
if nvrtc_mode == NVRTCMode.ConstantMemory:
custom_names = ["&wtf::params_raw"]
mod = CummNVRTCModule([ker_nvrtc],
cudadevrt_path=str(get_cudadevrt_path()),
verbose=False,
custom_names=custom_names)
# breakpoint()
# print(mod.get_ptx())
print("RTC COMPILE TIME", time.time() - t)
mod.load()
print(mod.kernels)
print("RTC COMPILE LOAD TIME", time.time() - t)
# breakpoint()
# print("START", params.get_algo_name())
if ndim == 3:
HW = [56] * ndim
else:
HW = [244] * ndim
RS = [1] * ndim
N = 1
C = 128
K = 128
padding = [RS[0] // 2] * ndim
stride = [1] * ndim
dilation = [1] * ndim
out_dims = ConvProblem.calc_output_dims_python(HW, RS, padding, stride,
dilation)
PQ = out_dims
op_type = params.op_type
iter_algo = ConvIterAlgo.Analytic
if params.dtype_a == dtypes.int8:
inp = np.random.randint(-2, 2, size=[N, *HW, C]).astype(np.int8)
weight = np.random.randint(-2, 2, size=[K, *RS, C]).astype(np.int8)
output = np.random.randint(-2, 2, size=[N, *PQ, K]).astype(np.int8)
doutput = np.random.randint(-2, 2, size=[N, *PQ,
K]).astype(np.int8)
else:
inp = np.random.uniform(-1, 1, size=[N, *HW, C]).astype(
dtypes.get_npdtype(params.dtype_input))
weight = np.random.uniform(-1, 1, size=[K, *RS, C]).astype(
dtypes.get_npdtype(params.dtype_weight))
output = np.random.uniform(-1, 1, size=[N, *PQ, K]).astype(
dtypes.get_npdtype(params.dtype_output))
doutput = np.random.uniform(-1, 1, size=[N, *PQ, K]).astype(
dtypes.get_npdtype(params.dtype_output))
nhwc_to_nchw_inds = [0, ndim + 1, *range(1, ndim + 1)]
nchw_to_nhwc_inds = [0, *range(2, ndim + 2), 1]
inp_th = torch.from_numpy(inp).permute(*nhwc_to_nchw_inds).to(
torch.float32).contiguous()
weight_th = torch.from_numpy(weight).permute(*nhwc_to_nchw_inds).to(
torch.float32).contiguous()
output_th = torch.from_numpy(output).permute(*nhwc_to_nchw_inds).to(
torch.float32).contiguous()
doutput_th = torch.from_numpy(doutput).permute(*nhwc_to_nchw_inds).to(
torch.float32).contiguous()
if params.dtype_a.itemsize() != 1:
inp_th = inp_th.cuda()
weight_th = weight_th.cuda()
output_th = output_th.cuda()
doutput_th = doutput_th.cuda()
inp_th.requires_grad = True
weight_th.requires_grad = True
with tv.measure_and_print("torch_fw", stream=torch.cuda.current_stream().cuda_stream):
if ndim == 1:
out_ref = F.conv1d(inp_th,
weight_th,
padding=padding,
stride=stride,
dilation=dilation)
elif ndim == 2:
out_ref = F.conv2d(inp_th,
weight_th,
padding=padding,
stride=stride,
dilation=dilation)
elif ndim == 3:
out_ref = F.conv3d(inp_th,
weight_th,
padding=padding,
stride=stride,
dilation=dilation)
else:
raise NotImplementedError
# print("TORCH time", time.time() - th_t)
with tv.measure_and_print("torch_bw", stream=torch.cuda.current_stream().cuda_stream):
if params.dtype_a.itemsize() != 1:
out_ref.backward(doutput_th)
# print("TORCH BW time", time.time() - th_t)
out_ref_nhwc = out_ref.detach().permute(
*nchw_to_nhwc_inds).contiguous().cpu().numpy()
if params.dtype_a.itemsize() != 1:
din_ref_nhwc = inp_th.grad.detach().permute(
*nchw_to_nhwc_inds).contiguous().cpu().numpy()
dw_ref_nhwc = weight_th.grad.detach().permute(
*nchw_to_nhwc_inds).contiguous().cpu().numpy()
else:
din_ref_nhwc = np.zeros_like(inp)
dw_ref_nhwc = np.zeros_like(weight)
# print("WTF PREPARED")
if params.op_type == ConvOpType.kBackwardInput:
inp_tv = tv.zeros(inp.shape, params.dtype_input.tv_dtype, 0)
else:
inp_tv = tv.from_numpy(inp).cuda()
if params.op_type == ConvOpType.kBackwardWeight:
weight_tv = tv.zeros(weight.shape, params.dtype_weight.tv_dtype, 0)
else:
weight_tv = tv.from_numpy(weight).cuda()
if params.op_type == ConvOpType.kForward:
output_tv = tv.zeros(output.shape, params.dtype_output.tv_dtype, 0)
else:
output_tv = tv.from_numpy(doutput).cuda()
torch.cuda.synchronize()
spk = 1
if op_type == ConvOpType.kBackwardWeight:
# TODO support splitk parallel
spk = 32
t = time.time()
algo = algo_cls(ker.problem.ndim, tv.gemm.ConvOpType(ker.problem.op_type.value))
algo.tile_shape = params.ts
algo.warp_tile_shape = params.wts
algo.num_stage = params.num_stage
algo.dacc = params.dtype_acc.tv_dtype
algo.dcomp = params.dtype_comp.tv_dtype
algo.algo = params.algo.value
algo.trans_a = params.trans_a
algo.trans_b = params.trans_b
algo.trans_c = params.trans_c
algo.element_per_access_a = ker.input_spec.input_sub_tile_shape_a[1]
algo.element_per_access_b = ker.input_spec.input_sub_tile_shape_b[1]
algo.element_per_access_c = ker.output_spec.out_iter.element_per_acc
algo.split_k_serial = params.splitk_serial
algo.dtype_a = params.dtype_a.tv_dtype
algo.dtype_b = params.dtype_b.tv_dtype
algo.dtype_c = params.dtype_c.tv_dtype
if params.tensorop is not None:
algo.tensorop = params.tensorop.shape
assert str(algo) == ker.get_algo_name()
params_cpp = params_cls(ker.problem.ndim, tv.gemm.ConvOpType(ker.problem.op_type.value))
params_cpp.conv_algo_desp = algo
params_cpp.split_k_slices = spk
params_cpp.input = inp_tv
params_cpp.weight = weight_tv
params_cpp.output = output_tv
params_cpp.padding = padding
params_cpp.stride = stride
params_cpp.dilation = dilation
params_cpp.beta = 0.0
use_dyn_parallel = False
nvrtc_params = tv.gemm.NVRTCParams()
# breakpoint()
nvrtc_params.cumodule = mod.get_cpp_object()
nvrtc_params.mode = nvrtc_mode.value
nvrtc_params.num_threads = ker.num_threads
nvrtc_params.smem_size = ker.smem_size
if nvrtc_mode == NVRTCMode.DynamicParallism:
nvrtc_params.kernel_name = mod.get_lowered_name(
"wtf::nvrtc_kernel")
elif nvrtc_mode == NVRTCMode.KernelAndCPU:
nvrtc_params.kernel_name = mod.get_lowered_name("wtf::conv_kernel")
nvrtc_params.init_kernel_name = mod.get_lowered_name(
"wtf::nvrtc_kernel_cpu_out")
nvrtc_params.param_size = mod.const_values[
f"wtf::{NVRTCConstants.SIZEOF_KEY}"]
nvrtc_params.param_storage = tv.empty([nvrtc_params.param_size],
tv.uint8, 0)
nvrtc_params.param_storage_cpu = tv.empty(
[nvrtc_params.param_size], tv.uint8, -1, pinned=True)
elif nvrtc_mode == NVRTCMode.Direct:
nvrtc_params.kernel_name = mod.get_lowered_name("wtf::conv_kernel")
elif nvrtc_mode == NVRTCMode.ConstantMemory:
nvrtc_params.kernel_name = mod.get_lowered_name("wtf::conv_kernel")
# print(nvrtc_params.kernel_name)
print(mod.get_kernel_attrs(nvrtc_params.kernel_name))
nvrtc_params.init_kernel_name = mod.get_lowered_name(
"wtf::nvrtc_kernel_cpu_out")
nvrtc_params.param_size = mod.const_values[
f"wtf::{NVRTCConstants.SIZEOF_KEY}"]
nvrtc_params.constant_name = mod.get_lowered_name(
"&wtf::params_raw")
nvrtc_params.param_storage = tv.empty([nvrtc_params.param_size],
tv.uint8, 0)
else:
raise NotImplementedError
kt = | |
import errno
import logging
import os
import subprocess
import sys
from unittest.mock import patch
import pytest
import runez
from runez.conftest import exception_raiser
from runez.program import RunAudit, RunResult
CHATTER = runez.DEV.tests_path("chatter")
def simulate_os_error(code):
e = OSError(code)
e.errno = code
def do_raise(*_):
raise e
return do_raise
def test_background_run(logged):
with runez.CurrentFolder(os.path.dirname(CHATTER)):
r = runez.run(CHATTER, "hello", background=True, dryrun=True, logger=True)
assert r.succeeded
assert "chatter hello &" in logged.pop()
r = runez.run(CHATTER, "hello", background=True, dryrun=False)
assert r.succeeded
assert r.pid
assert r.output is None
assert r.error is None
assert "chatter hello &" in logged.pop()
class CrashingWrite:
"""Simulate a file/stream that keeps on crashing"""
crash_counter = 0
def write(self, message):
self.crash_counter += 1
raise Exception("oops, failed to write %s" % message)
@pytest.mark.skipif(runez.SYS_INFO.platform_id.is_windows, reason="Not supported on windows")
def test_capture(monkeypatch):
with runez.CurrentFolder(os.path.dirname(CHATTER)):
# Check which finds programs in current folder
assert runez.which("chatter") == CHATTER
assert runez.shell("chatter hello") == "hello"
with runez.CaptureOutput(dryrun=True) as logged:
# Dryrun mode doesn't fail (since it doesn't actually run the program)
r = runez.run(CHATTER, "silent-fail", fatal=True)
assert r.succeeded
assert "[dryrun] " in r.output
assert r.error == ""
assert "Would run:" in logged.pop()
r = runez.run(CHATTER, "silent-fail", stdout=None, stderr=None, fatal=True)
assert r.succeeded
assert r.output is None
assert r.error is None
assert "Would run:" in logged.pop()
with runez.CaptureOutput(seed_logging=True) as logged:
# Test success
assert runez.run(CHATTER, "hello", fatal=False) == RunResult("hello", "", 0)
assert runez.run(CHATTER, "hello", fatal=True) == RunResult("hello", "", 0)
assert "chatter hello" in logged.pop()
assert runez.run(CHATTER, stdout=None) == RunResult(None, "", 0)
assert "Running:" in logged.pop()
crasher = CrashingWrite()
assert runez.run(CHATTER, "hello", fatal=True, passthrough=crasher) == RunResult("hello", "", 0)
assert crasher.crash_counter
assert "hello" in logged.pop()
# Test no-wait
r = runez.run(CHATTER, "hello", fatal=None, stdout=None, stderr=None)
assert r.exit_code is None # We don't know exit code because we didn't wait
assert r.pid
r = runez.run(CHATTER, stdout=None, stderr=None)
assert r
assert str(r) == "RunResult(exit_code=0)"
assert r.succeeded
assert r.output is None
assert r.error is None
assert r.full_output is None
r = runez.run(CHATTER, "hello", path_env={"PATH": ":.", "CPPFLAGS": " -I/usr/local/opt/openssl/include"})
assert str(r) == "RunResult(exit_code=0)"
assert r.succeeded
assert r.output == "hello"
assert r.error == ""
assert r.full_output == "hello"
# Test stderr
r = runez.run(CHATTER, "complain")
assert r.succeeded
assert r.output == ""
assert r.error == "complaining"
assert r.full_output == "complaining"
logged.pop()
# Test failure
with pytest.raises(Exception):
runez.run(CHATTER, "fail")
assert "Run failed:" in logged.pop()
r = runez.run(CHATTER, "silent-fail", fatal=False)
assert str(r) == "RunResult(exit_code=1)"
assert r.failed
assert r.error == ""
assert r.output == ""
assert r.full_output == r.error
if hasattr(subprocess.Popen, "__enter__"):
# Simulate an EIO
with patch("runez.program._read_data", side_effect=simulate_os_error(errno.EIO)):
r = runez.run(CHATTER, "fail", fatal=False, passthrough=True)
assert r.failed
assert r.exc_info is None
assert r.output == ""
assert r.error == ""
# Simulate an OSError
with patch("runez.program._read_data", side_effect=simulate_os_error(errno.EINTR)):
r = runez.run(CHATTER, "fail", fatal=False, passthrough=True)
assert r.failed
assert r.output is None
assert "failed: OSError(" in r.error
# Verify no extra "exited with code ..." message is added when pass-through had some output
logged.clear()
with pytest.raises(SystemExit):
runez.run(CHATTER, "fail", fatal=SystemExit, passthrough=True)
assert "exited with code" not in logged.pop()
with pytest.raises(runez.system.AbortException):
runez.run(CHATTER, "fail", fatal=True, passthrough=True)
assert "exited with code" not in logged.pop()
# Verify that silent pass-through gets at least mention of exit code
with pytest.raises(SystemExit):
runez.run(CHATTER, "silent-fail", fatal=SystemExit, passthrough=True)
assert "exited with code" in logged.pop()
with pytest.raises(runez.system.AbortException):
runez.run(CHATTER, "silent-fail", fatal=True, passthrough=True)
assert "exited with code" in logged.pop()
r = runez.run(CHATTER, "fail", fatal=False, passthrough=True)
assert r.failed
assert r.error == "failed"
assert r.output == "hello there"
assert r.full_output == "failed\nhello there"
r = runez.run("foo/bar", fatal=False)
assert r.exit_code == 1
assert "foo/bar is not an executable" in r.error
r = runez.run("foo-bar-no-such-program", fatal=False)
assert r.exit_code == 1
assert "is not installed (PATH=" in r.error
with monkeypatch.context() as m:
m.setattr(subprocess, "Popen", exception_raiser(OSError("testing")))
r = runez.run("python", "--version", fatal=False)
assert not r
assert r.failed
assert "python failed: OSError(" in r.error
assert r.output is None
with pytest.raises(OSError):
runez.run("python", "--version")
# Test convenience arg None filtering
logged.clear()
assert runez.run(CHATTER, "hello", "-a", 0, "-b", None, 1, 2, None, "foo bar") == RunResult("hello -a 0 1 2 foo bar", "", 0)
assert 'chatter hello -a 0 1 2 "foo bar"' in logged.pop()
@patch("runez.program.os.fork", return_value=None)
@patch("runez.program.os.setsid")
@patch("runez.program.os.open")
@patch("runez.program.os.dup2")
@patch("runez.program.os.close")
def test_daemonize(*_):
# This simply exercises code daemonize() that would otherwise run in a forked process
assert runez.program.daemonize() is None
@pytest.mark.skipif(runez.SYS_INFO.platform_id.is_windows, reason="Not supported on windows")
def test_executable(temp_folder):
with runez.CaptureOutput(dryrun=True) as logged:
assert runez.make_executable("some-file") == 1
assert "Would make some-file executable" in logged.pop()
assert runez.make_executable("some-file", logger=False) == 1
assert not logged
with runez.CaptureOutput() as logged:
assert runez.touch("some-file") == 1
assert "Touched some-file" in logged.pop()
assert runez.delete("some-file") == 1
assert "Deleted some-file" in logged.pop()
assert runez.touch("some-file", logger=logging.debug) == 1
assert "Touched some-file" in logged.pop()
assert runez.make_executable("some-file", logger=logging.debug) == 1
assert "Made 'some-file' executable" in logged.pop()
assert runez.is_executable("some-file")
assert runez.make_executable("some-file") == 0
assert not logged
assert runez.touch("some-file", logger=False) == 1
assert runez.delete("some-file", logger=False) == 1
assert not runez.is_executable("some-file")
assert not logged
assert runez.make_executable("/dev/null/some-file", fatal=False) == -1
assert "does not exist, can't make it executable" in logged.pop()
assert runez.make_executable("/dev/null/some-file", fatal=False, logger=None) == -1 # Don't log anything
assert not logged
assert runez.make_executable("/dev/null/some-file", fatal=False, logger=False) == -1 # Log errors only
assert "does not exist, can't make it executable" in logged.pop()
def test_pids():
assert not runez.check_pid(None)
assert not runez.check_pid(0)
assert not runez.check_pid("foo") # noqa, garbage given, don't crash
assert runez.check_pid(os.getpid())
assert not runez.check_pid(1) # No privilege to do this (tests shouldn't run as root)
def check_process_tree(pinfo, max_depth=10):
"""Verify that process info .parent does not recurse infinitely"""
if pinfo:
assert max_depth > 0
check_process_tree(pinfo.parent, max_depth=max_depth - 1)
def test_ps():
assert runez.PsInfo.from_pid(None) is None
assert runez.PsInfo.from_pid(0) is None
p = runez.PsInfo()
check_process_tree(p)
assert p == runez.PsInfo(0)
assert p == runez.PsInfo("0")
assert p == runez.PsInfo(os.getpid())
assert p == runez.PsInfo("%s" % os.getpid())
info = p.info
assert info["PID"] in str(p)
assert p.cmd
assert p.cmd_basename
assert p.ppid == os.getppid()
assert p.userid != p.uid
parent = p.parent
assert parent.pid == p.ppid
# Verify that both variants (user name or uid number) for UID work
uid = p.uid
userid = p.userid
p = runez.PsInfo()
if runez.to_int(info["UID"]) is None:
p.info["UID"] = uid
else:
p.info["UID"] = userid
assert p.uid == uid
assert p.userid == userid
# Edge case: verify __eq__ based on pid
p.pid = 0
assert p != runez.PsInfo(0)
def simulated_ps_output(pid, ppid, cmd):
template = "UID PID PPID CMD\n 0 {pid:>5} {ppid:>5} {cmd}"
return RunResult(output=template.format(pid=pid, ppid=ppid, cmd=cmd), code=0)
def simulated_tmux(program, *args, **_):
if program == "tmux":
return RunResult(output="3", code=0)
if program == "id":
if args[0] == "-un":
return RunResult(output="root", code=0)
return RunResult(output="0", code=0)
assert program == "ps"
pid = args[1]
if pid == 1:
return simulated_ps_output(pid, 0, "/sbin/init")
if pid == 2:
return simulated_ps_output(pid, 1, "tmux new-session ...")
if pid == 3:
return simulated_ps_output(pid, 1, "tmux attach-session ...")
if pid == -1:
return RunResult(code=1)
return simulated_ps_output(pid, 2, "/dev/null/some-test foo bar")
def test_ps_follow():
with patch("runez.program.run", side_effect=simulated_tmux):
assert runez.PsInfo.from_pid(-1) is None
bad_pid = runez.PsInfo(-1)
assert str(bad_pid) == "-1 None None"
assert bad_pid.cmd is None
assert bad_pid.cmd_basename is None
assert bad_pid.info is None
assert bad_pid.followed_parent is None
assert bad_pid.parent is None
assert bad_pid.pid == -1
assert bad_pid.ppid is None
assert bad_pid.uid is None
assert bad_pid.userid is None
assert bad_pid.parent_list(follow=True) == []
assert bad_pid.parent_list(follow=False) == []
p = runez.PsInfo()
assert p.cmd == "/dev/null/some-test foo bar"
assert p.cmd_basename == "/dev/null/some-test" # Falls back to using 1st sequence with space as basename
assert p.uid == 0
assert p.userid == "root"
parents = p.parent_list(follow=False)
followed_parents = p.parent_list(follow=True)
# Verify that parent_list(follow=True) follows parent processes properly
assert parents != followed_parents
assert parents == [p.parent, p.parent.parent]
assert followed_parents == [p.followed_parent, p.followed_parent.parent]
with patch("runez.program.is_executable", side_effect=lambda x: x == "/dev/null/some-test foo"):
# Edge case: verify that `ps` lack of quoting is properly detected
p = runez.PsInfo()
assert p.cmd == "/dev/null/some-test foo bar"
assert p.cmd_basename == "some-test foo"
def check_ri(platform, instructions=None):
with pytest.raises(Exception) as exc:
runez.program.require_installed("foo", instructions=instructions, platform=platform)
return str(exc)
def test_require_installed(monkeypatch):
monkeypatch.setattr(runez.program, "which", lambda x: "/bin/foo")
assert runez.program.require_installed("foo") is None # Does not raise
monkeypatch.setattr(runez.program, "which", lambda x: None)
r = check_ri("macos")
assert "foo is not installed, run: `brew install foo`" in r
r = check_ri("linux")
assert "foo is not installed, run: `apt install foo`" in r
r = check_ri("macos", instructions="custom instructions")
assert "foo is not installed, custom instructions" in r
r = | |
dweeks - days * ddays - hours * dhours - minutes * dmins - seconds) * 1000),
3))
if milliseconds.is_integer():
int(milliseconds)
result = []
if years != 0:
if years == 1:
s = ""
else:
s = "s"
result.append(f"{years} year{s}")
if month != 0:
result.append(f"{month} month")
if weeks != 0:
if weeks == 1:
s = ""
else:
s = "s"
result.append(f"{weeks} week{s}")
if days != 0:
if days == 1:
s = ""
else:
s = "s"
result.append(f"{days} day{s}")
if hours != 0:
if hours == 1:
s = ""
else:
s = "s"
result.append(f"{hours} hour{s}")
if minutes != 0:
if minutes == 1:
s = ""
else:
s = "s"
result.append(f"{minutes} minute{s}")
if seconds != 0:
if seconds == 1:
s = ""
else:
s = "s"
result.append(f"{seconds} second{s}")
if milliseconds != 0:
if years == 1:
s = ""
else:
s = "s"
result.append(f"{milliseconds} millisecond{s}")
result = ", ".join(result)
return result
# Command to set a role to expire
@slash.slash(name="role-expire",
guild_ids=bot.guild_ids,
description="Sets when a role will expire",
options=[
create_option(
name="role",
description="the role you want to expire",
option_type=option_type["role"],
required=True
), create_option(
name="time",
description="how long you want people to have the role",
option_type=option_type["string"],
required=True
)
])
@check(has_perms)
async def expire(ctx: Context, role: discord.Role, *, time: str):
print(role.permissions)
expire_duration = Duration(time)
expire_duration = expire_duration.to_seconds()
if int(expire_duration) == 0:
await ctx.send(f"Check your syntax, /role-help")
return
roles_json_data = json.load(roles_json)
roles_json.seek(0)
found = False
t = str(role.id)
for expiring_role in roles_json_data["roles"]:
if expiring_role[0] == t:
for memberlist in RJD[t]:
memberlist[1] -= (expiring_role[1] - expire_duration)
for memberlist2 in roles_json_data[t]:
memberlist2[1] -= (expiring_role[1] - expire_duration)
expiring_role[1] = expire_duration
RJD["roles"][roles_json_data["roles"].index(expiring_role)][1] = expire_duration
print(roles_json_data)
found = True
break
if not found:
RJD["roles"].append([t, expire_duration])
RJD[t] = []
roles_json_data["roles"].append([t, expire_duration])
roles_json_data[t] = []
jsondump(roles_json_data)
await ctx.send(
f"✓ set {role.name} to expire {str(Duration(time)).replace('<Duration ', 'after ').replace('>', '')}")
@bot.command(description="Sets a role to expire", category="Moderation")
@check(has_perms)
async def expire(ctx: Context, role: discord.Role, *, time: str):
print(role.permissions)
expire_duration = Duration(time)
expire_duration = expire_duration.to_seconds()
if int(expire_duration) == 0:
await ctx.send(f"Check your syntax, $role-help")
return
roles_json_data = json.load(roles_json)
roles_json.seek(0)
found = False
t = str(role.id)
for expiring_role in roles_json_data["roles"]:
if expiring_role[0] == t:
for memberlist in RJD[t]:
memberlist[1] -= (expiring_role[1] - expire_duration)
for memberlist2 in roles_json_data[t]:
memberlist2[1] -= (expiring_role[1] - expire_duration)
expiring_role[1] = expire_duration
RJD["roles"][roles_json_data["roles"].index(expiring_role)][1] = expire_duration
print(roles_json_data)
found = True
break
if not found:
RJD["roles"].append([t, expire_duration])
RJD[t] = []
roles_json_data["roles"].append([t, expire_duration])
roles_json_data[t] = []
jsondump(roles_json_data)
await ctx.message.add_reaction("✓")
@slash.slash(name="role-unexpire",
guild_ids=bot.guild_ids,
description="Sets a role to not expire",
options=[
create_option(
name="role",
description="the role you don't want to expire",
option_type=option_type["role"],
required=True
)
])
@check(has_perms)
async def unexpire(ctx, role: discord.Role):
roles_json.seek(0)
roles_json_data = json.load(roles_json)
roles_json.seek(0)
for expiring_role in roles_json_data["roles"]:
if expiring_role[0] == str(role.id):
roles_json_data["roles"].remove(expiring_role)
RJD["roles"].remove(expiring_role)
del roles_json_data[str(role.id)]
del RJD[str(role.id)]
jsondump(roles_json_data)
await ctx.send(f"✓ set {role.name} to not expire")
@bot.command(description="Removes a role from expiration", category="Moderation")
@check(has_perms)
async def unexpire(ctx, role: discord.Role):
roles_json.seek(0)
roles_json_data = json.load(roles_json)
roles_json.seek(0)
for expiring_role in roles_json_data["roles"]:
if expiring_role[0] == str(role.id):
roles_json_data["roles"].remove(expiring_role)
RJD["roles"].remove(expiring_role)
del roles_json_data[str(role.id)]
del RJD[str(role.id)]
jsondump(roles_json_data)
await ctx.message.add_reaction("✓")
@slash.slash(name="role-help",
guild_ids=bot.guild_ids,
description="Displays the role help embed"
)
async def _help(ctx: discord.ext.commands.Context):
help_embed = discord.Embed(
title=f"Role Help",
description="slash commands",
colour=discord.Colour.blue()
)
help_embed.add_field(
name="role-expire",
value=f"_Sets a role to expire after a certain amount of time_\n\n"
f"`/role-expire <role> <time>`\n(eg, /role-expire @examplerole 1m 12s)",
inline=False
)
help_embed.add_field(
name="role-unexpire",
value=f"_Removes a role from the list of expiring roles_\n\n"
f"`/role-unexpire <role>`\n(eg, /role-unexpire @examplerole2)",
inline=False
)
help_embed.add_field(
name="AddPerm",
value=f"_Gives a role permissions to use this bot."
f" You need to have `Manage Roles` Permissions to use this command._\n\n`/addperm <role>`",
inline=False
)
help_embed.add_field(
name="DelPerm",
value=f"_Removes a role's permission to use this bot."
f" You need to have `Manage Roles` Permissions to use this command._\n\n`/delperm <role>`",
inline=False
)
help_embed.add_field(
name="ViewRoles",
value=f"_Displays the current settings._\n\n`/viewroles`",
inline=False
)
help_embed.add_field(
name="ViewPerms",
value=f"_Displays which Roles have permissions to configure the Bot._\n\n`/viewperms`",
inline=False
)
help_embed.add_field(
name="Ping",
value=f"_Displays the bots latency._\n\n`/ping`",
inline=False
)
await ctx.send(embed=help_embed)
@bot.command(name="role-help", description="Shows the role expiry settings help", category="Moderation")
async def _help(ctx: discord.ext.commands.Context):
help_embed = discord.Embed(
title=f"{bot_name} >> Help",
description="Commands",
colour=discord.Colour.blue()
)
help_embed.add_field(
name="Expire",
value=f"_Sets a role to expire after a certain amount of time_\n\n"
f"`$expire <role> <time>`\n(eg, $expire @examplerole 1m 12s)",
inline=False
)
help_embed.add_field(
name="Unexpire",
value=f"_Removes a role from the list of expiring roles_\n\n"
f"`$unexpire <role>`\n(eg, $unexpire @examplerole2)",
inline=False
)
help_embed.add_field(
name="AddPerm",
value=f"_Gives a role permissions to use this bot."
f" You need to have `Manage Roles` Permissions to use this command._\n\n`$addperm <role>`",
inline=False
)
help_embed.add_field(
name="DelPerm",
value=f"_Removes a role's permission to use this bot."
f" You need to have `Manage Roles` Permissions to use this command._\n\n`$delperm <role>`",
inline=False
)
help_embed.add_field(
name="ViewRoles",
value=f"_Displays the current settings_\n\n`$viewroles`",
inline=False
)
help_embed.add_field(
name="ViewPerms",
value=f"_Displays which Roles have permissions to configure the Bot_\n\n`$viewperms`",
inline=False
)
help_embed.add_field(
name="Ping",
value=f"_Displays the bots latency.\n\n`$ping`",
inline=False
)
await ctx.send(embed=help_embed)
@slash.slash(name="addperm",
guild_ids=bot.guild_ids,
description=f"Adds a role to manage {bot_name}",
options=[
create_option(
name="role",
description="the role",
option_type=option_type["role"],
required=True
)
])
@has_permissions(manage_roles=True)
async def addperm(ctx: Context, role: discord.Role):
r = role.id
if r not in RJD["perms"]:
RJD["perms"].append(r)
y = json.load(roles_json)
roles_json.seek(0)
y["perms"].append(r)
jsondump(y)
await ctx.send(f"✓ added {role.name} to the management team")
else:
await ctx.send("That role already has permissions!")
@bot.command(description="Adds a role to be allowed to manage the bot", category="Moderation")
@has_permissions(manage_roles=True)
async def addperm(ctx: Context, role: discord.Role):
r = role.id
if r not in RJD["perms"]:
RJD["perms"].append(r)
y = json.load(roles_json)
roles_json.seek(0)
y["perms"].append(r)
jsondump(y)
await ctx.send(f"✓ added {role.name} to the management team")
else:
await ctx.send("That role already has permissions!")
@slash.slash(name="delperm",
guild_ids=bot.guild_ids,
description=f"Removes a role from managing {bot_name}",
options=[
create_option(
name="role",
description="the role",
option_type=option_type["role"],
required=True
)
])
@has_permissions(manage_roles=True)
async def delperm(ctx: Context, role: discord.Role):
r = role.id
if r in RJD["perms"]:
RJD["perms"].remove(r)
y = json.load(roles_json)
roles_json.seek(0)
y["perms"].remove(r)
jsondump(y)
await ctx.send(f"✓ removed {role.name} from the management role")
else:
await ctx.send("I don't think that role had permissions :confused:")
@bot.command(description="Removes a role from managing the bot", category="Moderation")
@has_permissions(manage_roles=True)
async def delperm(ctx: Context, role: discord.Role):
r = role.id
if r in RJD["perms"]:
RJD["perms"].remove(r)
y = json.load(roles_json)
roles_json.seek(0)
y["perms"].remove(r)
jsondump(y)
await ctx.send(f"✓ removed {role.name} from the management role")
else:
await ctx.send("I don't think that role had permissions :confused:")
@slash.slash(name="viewroles",
guild_ids=bot.guild_ids,
description="View the roles that are set to expire"
)
async def viewroles(ctx: Context):
roles = []
for role in RJD["roles"]:
roles.append(f"<@&{role[0]}>")
expires = []
for role in RJD["roles"]:
expires.append(timeformat(role[1]))
roles_embed = discord.Embed(
title=f"{bot_name} >> Roles",
description=f"Displays all Roles you added using /expire",
colour=discord.Colour.blue()
)
roles_embed.add_field(
name="Role",
value="\n".join(roles),
inline=True
)
roles_embed.add_field(
name="Expires After",
value="\n".join(expires),
inline=True
)
await ctx.send(embed=roles_embed)
@bot.command(description="Lists the roles that are set to expire", category="Moderation")
async def viewroles(ctx: Context):
roles = []
for role in RJD["roles"]:
roles.append(f"<@&{role[0]}>")
expires = []
for role in RJD["roles"]:
expires.append(timeformat(role[1]))
roles_embed = discord.Embed(
title=f"{bot_name} >> Roles",
description=f"Displays all Roles you added using $expire",
colour=discord.Colour.blue()
)
roles_embed.add_field(
name="Role",
value="\n".join(roles),
inline=True
)
roles_embed.add_field(
name="Expires After",
value="\n".join(expires),
inline=True
)
await ctx.send(embed=roles_embed)
@slash.slash(name="viewperms",
guild_ids=bot.guild_ids,
description=f"Views the roles allowed to manage {bot_name}")
async def viewperms(ctx: Context):
perms = []
for role in RJD["perms"]:
perms.append(f"<@&{role}>")
perms_embed = discord.Embed(
title=f"{bot_name} >> Permissions",
description=f"Displays all Roles (you added using /addperm) that have permissions.",
colour=discord.Colour.blue()
)
perms_embed.add_field(
name="Role(s) with Permissions",
value="\n".join(perms),
inline=False
)
await ctx.send(embed=perms_embed)
@bot.command(description="Views the roles that are allowed to manage the bot", category="Moderation")
async def viewperms(ctx: Context):
perms = []
for role in RJD["perms"]:
perms.append(f"<@&{role}>")
perms_embed = discord.Embed(
title=f"{bot_name} >> Permissions",
description=f"Displays all Roles (you added using $addperm) that have permissions.",
colour=discord.Colour.blue()
)
perms_embed.add_field(
name="Role(s) with Permissions",
value="\n".join(perms),
inline=False
)
await ctx.send(embed=perms_embed)
@slash.slash(name="ping",
guild_ids=bot.guild_ids,
description=f"Checks {bot_name}'s ping")
async def ping(ctx):
await ctx.send(f'My ping is {round((bot.latency * 1000), 3)} ms!')
@bot.command(description=f"Checks {bot_name}'s ping", category="")
async def ping(ctx):
await ctx.send(f'My ping is {round((bot.latency * 1000), 3)} ms!')
@slash.context_menu(target=ContextMenuType.USER,
name="Who is this?",
guild_ids=bot.guild_ids)
async def context_menu(ctx: MenuContext):
user = ctx.target_author
if user.activities: # check if the user has an activity
if str(user.activities[0].type) == "ActivityType.playing":
activity = "Playing:"
elif str(user.activities[0].type) == "ActivityType.streaming":
activity = "Streaming:"
elif str(user.activities[0].type) == "ActivityType.listening":
activity = "Listening to:"
elif str(user.activities[0].type) == "ActivityType.watching":
activity = "Watching"
elif str(user.activities[0].type) == "ActivityType.custom":
activity = ""
elif str(user.activities[0].type) == "ActivityType.competing":
activity = "Competing in:"
else:
activity = "Funkiness"
has_activity = True
else: # if they don't we can't reference it
has_activity = False
top_role = user.roles[-1] # first element in roles is `@everyone` and last is top role
embed = discord.Embed(color=top_role.color, description=user.mention)
embed.set_author(name=str(user), icon_url=user.avatar_url)
| |
import glob
import math
import warnings
import boto3
import numpy as np
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from torch import optim
from codes.rl.upbit_rl_replay_buffer import PrioritizedReplayBuffer, ReplayBuffer
warnings.filterwarnings("ignore")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
import random
import sys, os
idx = os.getcwd().index("trade")
PROJECT_HOME = os.getcwd()[:idx] + "trade"
sys.path.append(PROJECT_HOME)
from codes.rl.upbit_rl_utils import BuyerAction, SellerAction
import torch.nn as nn
import torch
from codes.rl.upbit_rl_constants import GAMMA, LEARNING_RATE, TRAIN_BATCH_SIZE_PERCENT, TRAIN_REPEATS, \
BUYER_MODEL_SAVE_PATH, SELLER_MODEL_SAVE_PATH, BUYER_MODEL_FILE_NAME, S3_BUCKET_NAME, SELLER_MODEL_FILE_NAME, \
TRAIN_BATCH_MIN_SIZE, REPLAY_MEMORY_SIZE, SIZE_OF_FEATURE, SIZE_OF_FEATURE_WITHOUT_VOLUME, \
TRAIN_REPEATS_STEPS, TRAIN_BATCH_MIN_SIZE_STEPS, SIZE_OF_OHLCV_FEATURE, SIZE_OF_OHLCV_FEATURE_WITHOUT_VOLUME
is_cuda = torch.cuda.is_available()
if is_cuda:
device = torch.device("cuda")
print("GPU is available")
else:
device = torch.device("cpu")
print("GPU not available, CPU used")
s3 = boto3.client('s3')
class DeepBuyerPolicy:
def __init__(self, args=None, play=False):
self.args = args
self.play = play
if self.args.ohlc:
if self.args.volume:
self.input_size = SIZE_OF_OHLCV_FEATURE
else:
self.input_size = SIZE_OF_OHLCV_FEATURE_WITHOUT_VOLUME
else:
if self.args.volume:
self.input_size = SIZE_OF_FEATURE
else:
self.input_size = SIZE_OF_FEATURE_WITHOUT_VOLUME
if self.args.lstm:
if self.args.ohlc:
self.q = QNet_LSTM(input_size=self.input_size, hidden_size=[64, 24])
self.q_target = QNet_LSTM(input_size=self.input_size, hidden_size=[64, 24])
else:
self.q = QNet_LSTM(input_size=self.input_size, hidden_size=[256, 128])
self.q_target = QNet_LSTM(input_size=self.input_size, hidden_size=[256, 128])
else:
if self.args.ohlc:
self.q = QNet_CNN(
input_size=self.input_size, input_height=int(self.args.window_size), conv_kernel_size_list=[2, 2, 2]
)
self.q_target = QNet_CNN(
input_size=self.input_size, input_height=int(self.args.window_size), conv_kernel_size_list=[2, 2, 2]
)
else:
self.q = QNet_CNN(
input_size=self.input_size, input_height=int(self.args.window_size), conv_kernel_size_list=[3, 3, 3]
)
self.q_target = QNet_CNN(
input_size=self.input_size, input_height=int(self.args.window_size), conv_kernel_size_list=[3, 3, 3]
)
if self.play:
self.load_model()
if hasattr(self.args, "per") and self.args.per:
self.buyer_memory = PrioritizedReplayBuffer(capacity=REPLAY_MEMORY_SIZE)
else:
self.buyer_memory = ReplayBuffer(capacity=REPLAY_MEMORY_SIZE)
self.pending_buyer_transition = None
self.optimizer = optim.Adam(self.q.parameters(), lr=LEARNING_RATE)
def sample_action(self, observation, info_dic, epsilon):
action, from_model = self.q.sample_action(observation, epsilon)
if action: # 1
return BuyerAction.MARKET_BUY, from_model
else:
return BuyerAction.BUY_HOLD, from_model
def qnet_copy_to_target_qnet(self):
self.q_target.load_state_dict(self.q.state_dict())
def save_model(self, episode, max_total_balance_per_episode,
market_profitable_buys_from_model_rate,
market_profitable_sells_from_model_rate):
self.remove_model()
buyer_model_file_path = BUYER_MODEL_SAVE_PATH.format(
"LSTM" if self.args.lstm else "CNN",
self.args.coin,
int(self.args.window_size),
self.input_size,
"{0}_{1:3.2f}_{2:3.2f}_{3:3.2f}".format(
episode, max_total_balance_per_episode, market_profitable_buys_from_model_rate, market_profitable_sells_from_model_rate
)
)
torch.save(self.q.state_dict(), buyer_model_file_path)
# buyer_model_file_name = BUYER_MODEL_FILE_NAME.format(
# "LSTM" if self.args.lstm else "CNN",
# self.args.coin,
# int(self.args.window_size),
# self.input_size,
# episode
# )
# if self.args.federated:
# s3.upload_file(
# buyer_model_file_path,
# S3_BUCKET_NAME,
# "REINFORCEMENT_LEARNING/{0}".format(buyer_model_file_name)
# )
def remove_model(self):
buyer_model_file_path = BUYER_MODEL_SAVE_PATH.format(
"LSTM" if self.args.lstm else "CNN",
self.args.coin,
int(self.args.window_size),
self.input_size,
"*"
)
for name in glob.glob(buyer_model_file_path):
os.remove(name)
def load_model(self):
last_buyer_model_file_path = BUYER_MODEL_SAVE_PATH.format(
"LSTM" if self.args.lstm else "CNN",
self.args.coin,
int(self.args.window_size),
self.input_size,
"*"
)
# last_buyer_model_file_name = BUYER_MODEL_FILE_NAME.format(
# "LSTM" if self.args.lstm else "CNN",
# self.args.coin,
# int(self.args.window_size),
# self.input_size,
# "*"
# )
for name in glob.glob(last_buyer_model_file_path):
self.q.load_state_dict(torch.load(name))
print("LOADED BY EXISTING BUYER POLICY MODEL FROM LOCAL FILE: {0}!!!\n".format(
name
))
break
# if self.args.federated:
# s3.download_file(
# S3_BUCKET_NAME,
# "REINFORCEMENT_LEARNING/{0}".format(last_buyer_model_file_name),
# last_buyer_model_file_path
# )
# self.q.load_state_dict(torch.load(last_buyer_model_file_path))
# print("LOADED BY EXISTING BUYER POLICY MODEL FROM AWS S3!!!\n")
# else:
# if os.path.exists(last_buyer_model_file_path):
# self.q.load_state_dict(torch.load(last_buyer_model_file_path))
# print("LOADED BY EXISTING BUYER POLICY MODEL FROM LOCAL STORAGE!!!\n")
# else:
# print("THERE IS NO SAVED MODEL: {0}".format(last_buyer_model_file_path))
# exit(-1)
self.qnet_copy_to_target_qnet()
def train(self, beta):
loss_lst = []
if self.args.train_episode_ends:
train_repeats = TRAIN_REPEATS
train_batch_min_size = TRAIN_BATCH_MIN_SIZE
else:
train_repeats = TRAIN_REPEATS_STEPS
train_batch_min_size = TRAIN_BATCH_MIN_SIZE_STEPS
for i in range(train_repeats):
train_batch_size = min(
train_batch_min_size,
int(self.buyer_memory.size() * TRAIN_BATCH_SIZE_PERCENT / 100)
)
indices = weights = None
if hasattr(self.args, "per") and self.args.per:
s, a, r, s_prime, done_mask, indices, weights = self.buyer_memory.sample_priority_memory(
train_batch_size, beta=beta
)
else:
s, a, r, s_prime, done_mask = self.buyer_memory.sample_memory(train_batch_size)
q_out = self.q(s)
q_a = q_out.gather(1, a)
max_q_prime = self.q_target(s_prime).max(1)[0].unsqueeze(1).detach()
target = r + GAMMA * max_q_prime * done_mask
if hasattr(self.args, "per") and self.args.per:
q_a = torch.squeeze(q_a, dim=1)
target = torch.squeeze(target, dim=1)
loss = (q_a - target).pow(2) * weights
#loss = (target - q_a) * weights
prios = torch.abs(q_a - target) + 1e-5
loss = loss.mean()
loss_lst.append(loss.item())
self.buyer_memory.update_priorities(indices, prios.data.cpu().numpy())
else:
q_a = torch.squeeze(q_a, dim=1)
target = torch.squeeze(target, dim=1)
loss = (q_a - target).pow(2).mean()
loss_lst.append(loss.item())
self.optimizer.zero_grad()
loss.backward()
# for param in self.q.parameters():
# param.grad.data.clamp_(-1, 1)
self.optimizer.step()
avg_loss = np.average(loss_lst)
#print("*** Buyer Policy Trained (Loss: {0}) ***\n".format(avg_loss))
return avg_loss
def update_episode_reward(self, episode, episode_reward):
for transition in self.buyer_memory.buffer:
if transition[0] == episode:
transition[3] += episode_reward
class DeepSellerPolicy:
def __init__(self, args=None, play=False):
self.args = args
self.play = play
if self.args.ohlc:
if self.args.volume:
self.input_size = SIZE_OF_OHLCV_FEATURE
else:
self.input_size = SIZE_OF_OHLCV_FEATURE_WITHOUT_VOLUME
else:
if self.args.volume:
self.input_size = SIZE_OF_FEATURE
else:
self.input_size = SIZE_OF_FEATURE_WITHOUT_VOLUME
if self.args.lstm:
if self.args.ohlc:
self.q = QNet_LSTM(input_size=self.input_size, hidden_size=[64, 24])
self.q_target = QNet_LSTM(input_size=self.input_size, hidden_size=[64, 24])
else:
self.q = QNet_LSTM(input_size=self.input_size, hidden_size=[256, 128])
self.q_target = QNet_LSTM(input_size=self.input_size, hidden_size=[256, 128])
else:
if self.args.ohlc:
self.q = QNet_CNN(
input_size=self.input_size, input_height=int(self.args.window_size), conv_kernel_size_list=[2, 2, 2]
)
self.q_target = QNet_CNN(
input_size=self.input_size, input_height=int(self.args.window_size), conv_kernel_size_list=[2, 2, 2]
)
else:
self.q = QNet_CNN(
input_size=self.input_size, input_height=int(self.args.window_size), conv_kernel_size_list=[3, 3, 3]
)
self.q_target = QNet_CNN(
input_size=self.input_size, input_height=int(self.args.window_size), conv_kernel_size_list=[3, 3, 3]
)
if self.play:
self.load_model()
if hasattr(self.args, "per") and self.args.per:
self.seller_memory = PrioritizedReplayBuffer(capacity=REPLAY_MEMORY_SIZE)
else:
self.seller_memory = ReplayBuffer(capacity=REPLAY_MEMORY_SIZE)
self.optimizer = optim.Adam(self.q.parameters(), lr=LEARNING_RATE)
def sample_action(self, observation, info_dic, epsilon):
action, from_model = self.q.sample_action(observation, epsilon)
if action: # 1
return SellerAction.MARKET_SELL, from_model
else:
return SellerAction.SELL_HOLD, from_model
def qnet_copy_to_target_qnet(self):
self.q_target.load_state_dict(self.q.state_dict())
def save_model(self, episode, max_total_balance_per_episode,
market_profitable_buys_from_model_rate,
market_profitable_sells_from_model_rate):
self.remove_model()
seller_model_file_path = SELLER_MODEL_SAVE_PATH.format(
"LSTM" if self.args.lstm else "CNN",
self.args.coin,
int(self.args.window_size),
self.input_size,
"{0}_{1:3.2f}_{2:3.2f}_{3:3.2f}".format(
episode, max_total_balance_per_episode, market_profitable_buys_from_model_rate, market_profitable_sells_from_model_rate
)
)
torch.save(self.q.state_dict(), seller_model_file_path)
# seller_model_file_name = SELLER_MODEL_FILE_NAME.format(
# "LSTM" if self.args.lstm else "CNN",
# self.args.coin,
# int(self.args.window_size),
# SIZE_OF_FEATURE if self.args.volume else SIZE_OF_FEATURE_WITHOUT_VOLUME,
# episode
# )
# if self.args.federated:
# s3.upload_file(
# seller_model_file_path,
# S3_BUCKET_NAME,
# "REINFORCEMENT_LEARNING/{0}".format(seller_model_file_name)
# )
def remove_model(self):
seller_model_file_path = SELLER_MODEL_SAVE_PATH.format(
"LSTM" if self.args.lstm else "CNN",
self.args.coin,
int(self.args.window_size),
self.input_size,
"*"
)
for name in glob.glob(seller_model_file_path):
os.remove(name)
def load_model(self):
last_seller_model_file_path = SELLER_MODEL_SAVE_PATH.format(
"LSTM" if self.args.lstm else "CNN",
self.args.coin,
int(self.args.window_size),
self.input_size,
"*"
)
# last_seller_model_file_name = SELLER_MODEL_FILE_NAME.format(
# "LSTM" if self.args.lstm else "CNN",
# self.args.coin,
# int(self.args.window_size),
# self.input_size,
# "*"
# )
for name in glob.glob(last_seller_model_file_path):
self.q.load_state_dict(torch.load(name))
print("LOADED BY EXISTING SELLER POLICY MODEL FROM LOCAL FILE: {0}!!!\n".format(
name
))
break
# if self.args.federated:
# s3.download_file(
# S3_BUCKET_NAME,
# "REINFORCEMENT_LEARNING/{0}".format(last_seller_model_file_name),
# last_seller_model_file_path
# )
# self.q.load_state_dict(torch.load(last_seller_model_file_path))
# print("LOADED BY EXISTING SELLER POLICY MODEL FROM AWS S3!!!\n")
# else:
# if os.path.exists(last_seller_model_file_path):
# self.q.load_state_dict(torch.load(last_seller_model_file_path))
# print("LOADED BY EXISTING SELLER POLICY MODEL FROM LOCAL STORAGE!!!\n")
# else:
# print("THERE IS NO SAVED MODEL: {0}".format(last_seller_model_file_path))
# exit(-1)
self.qnet_copy_to_target_qnet()
def train(self, beta):
loss_lst = []
if self.args.train_episode_ends:
train_repeats = TRAIN_REPEATS
train_batch_min_size = TRAIN_BATCH_MIN_SIZE
else:
train_repeats = TRAIN_REPEATS_STEPS
train_batch_min_size = TRAIN_BATCH_MIN_SIZE_STEPS
for i in range(train_repeats):
train_batch_size = min(
train_batch_min_size,
int(self.seller_memory.size() * TRAIN_BATCH_SIZE_PERCENT / 100)
)
indices = weights = None
if hasattr(self.args, "per") and self.args.per:
s, a, r, s_prime, done_mask, indices, weights = self.seller_memory.sample_priority_memory(
train_batch_size, beta=beta
)
else:
s, a, r, s_prime, done_mask = self.seller_memory.sample_memory(train_batch_size)
q_out = self.q(s)
q_a = q_out.gather(1, a)
max_q_prime = self.q_target(s_prime).max(1)[0].unsqueeze(1).detach()
target = r + GAMMA * max_q_prime * done_mask
if hasattr(self.args, "per") and self.args.per:
q_a = torch.squeeze(q_a, dim=1)
target = torch.squeeze(target, dim=1)
loss = (target - q_a).pow(2) * weights
#loss = (target - q_a) * weights
prios = torch.abs(q_a - target) + 1e-5
loss = loss.mean()
loss_lst.append(loss.item())
self.seller_memory.update_priorities(indices, prios.data.cpu().numpy())
else:
q_a = torch.squeeze(q_a, dim=1)
target = torch.squeeze(target, dim=1)
loss = (q_a - target).pow(2).mean()
loss_lst.append(loss.item())
self.optimizer.zero_grad()
loss.backward()
# for param in self.q.parameters():
# param.grad.data.clamp_(-1, 1)
self.optimizer.step()
avg_loss = np.average(loss_lst)
#print("*** Seller Policy Trained (Loss: {0}) ***\n".format(avg_loss))
return avg_loss
def update_episode_reward(self, episode, episode_reward):
for transition in self.seller_memory.buffer:
if transition[0] == episode:
transition[3] += episode_reward
class QNet_CNN(nn.Module):
@staticmethod
def get_conv2d_size(w, h, kernel_size, padding_size, stride):
return math.floor((w - kernel_size + 2 * padding_size) / stride) + 1, math.floor(
(h - kernel_size + 2 * padding_size) / stride) + 1
@staticmethod
def get_pool2d_size(w, h, kernel_size, stride):
return math.floor((w - kernel_size) / stride) + 1, math.floor((h - kernel_size) / stride) + 1
def __init__(self, input_height, input_size, output_size=2, conv_kernel_size_list=[2, 2, 2]): #input_size=36, input_height=21
super(QNet_CNN, self).__init__()
self.output_size = output_size
self.layer = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=2, kernel_size=conv_kernel_size_list[0]), # [batch_size,1,28,28] -> [batch_size,16,24,24]
nn.BatchNorm2d(num_features=2),
nn.LeakyReLU(),
nn.Conv2d(in_channels=2, out_channels=4, kernel_size=conv_kernel_size_list[1]), # [batch_size,16,24,24] -> [batch_size,32,20,20]
nn.BatchNorm2d(num_features=4),
nn.LeakyReLU(),
nn.MaxPool2d(kernel_size=2, stride=1), # [batch_size,32,20,20] -> [batch_size,32,10,10]
nn.Conv2d(in_channels=4, out_channels=6, kernel_size=conv_kernel_size_list[2]), # [batch_size,32,10,10] -> [batch_size,64,6,6]
nn.BatchNorm2d(num_features=6),
nn.LeakyReLU(),
nn.MaxPool2d(kernel_size=2, stride=1) # [batch_size,64,6,6] -> [batch_size,64,3,3]
)
w, h = self.get_conv2d_size(w=input_size, h=input_height, kernel_size=conv_kernel_size_list[0], padding_size=0, stride=1)
w, h = self.get_conv2d_size(w=w, h=h, kernel_size=conv_kernel_size_list[1], padding_size=0, stride=1)
w, h = self.get_pool2d_size(w=w, h=h, kernel_size=2, stride=1)
w, h = self.get_conv2d_size(w=w, h=h, kernel_size=conv_kernel_size_list[2], padding_size=0, stride=1)
w, h = self.get_pool2d_size(w=w, h=h, kernel_size=2, stride=1)
self.fc_layer = nn.Sequential(
nn.Linear(w * h * 6, 128),
nn.LeakyReLU(),
nn.Linear(128, 32),
nn.LeakyReLU(),
nn.Linear(32, self.output_size)
)
def forward(self, x):
x = x.unsqueeze(dim=1)
out = self.layer(x)
out = out.view(x.size(0), -1)
out = self.fc_layer(out)
return out.squeeze(dim=1)
def sample_action(self, x, | |
Python 2 native strings
are stored as bytes. In Python 3 native strings are stored as
unicode.
"""
if not self.enabled:
return ''
if self._state != self.STATE_RUNNING:
return ''
if self.ignore_transaction:
return ''
# Only generate a footer if the header had already been
# generated and we haven't already generated the footer.
if not self.rum_header_generated:
return ''
if self.rum_footer_generated:
return ''
# Make sure we freeze the path.
self._freeze_path()
# When obfuscating values for the footer, we only use the
# first 13 characters of the account license key.
obfuscation_key = self._settings.license_key[:13]
attributes = {}
user_attributes = {}
for attr in self.user_attributes:
if attr.destinations & DST_BROWSER_MONITORING:
user_attributes[attr.name] = attr.value
if user_attributes:
attributes['u'] = user_attributes
request_parameters = self.request_parameters
request_parameter_attributes = self.filter_request_parameters(
request_parameters)
agent_attributes = {}
for attr in request_parameter_attributes:
if attr.destinations & DST_BROWSER_MONITORING:
agent_attributes[attr.name] = attr.value
if agent_attributes:
attributes['a'] = agent_attributes
# create the data structure that pull all our data in
footer_data = self.browser_monitoring_intrinsics(obfuscation_key)
if attributes:
attributes = obfuscate(json_encode(attributes), obfuscation_key)
footer_data['atts'] = attributes
footer = _js_agent_footer_fragment % json_encode(footer_data)
# To avoid any issues with browser encodings, we will make sure that
# the javascript we inject for the browser agent is ASCII encodable.
# Since we obfuscate all agent and user attributes, and the transaction
# name with base 64 encoding, this will preserve those strings, if
# they have values outside of the ASCII character set.
# In the case of Python 2, we actually then use the encoded value
# as we need a native string, which for Python 2 is a byte string.
# If encoding as ASCII fails we will return an empty string.
try:
if six.PY2:
footer = footer.encode('ascii')
else:
footer.encode('ascii')
except UnicodeError:
if not WebTransaction.unicode_error_reported:
_logger.error('ASCII encoding of js-agent-footer failed.',
footer)
WebTransaction.unicode_error_reported = True
footer = ''
# We remember if we have returned a non empty string value and
# if called a second time we will not return it again.
if footer:
self.rum_footer_generated = True
return footer
def browser_monitoring_intrinsics(self, obfuscation_key):
txn_name = obfuscate(self.path, obfuscation_key)
queue_start = self.queue_start or self.start_time
start_time = self.start_time
end_time = time.time()
queue_duration = int((start_time - queue_start) * 1000)
request_duration = int((end_time - start_time) * 1000)
intrinsics = {
"beacon": self._settings.beacon,
"errorBeacon": self._settings.error_beacon,
"licenseKey": self._settings.browser_key,
"applicationID": self._settings.application_id,
"transactionName": txn_name,
"queueTime": queue_duration,
"applicationTime": request_duration,
"agent": self._settings.js_agent_file,
}
if self._settings.browser_monitoring.ssl_for_http is not None:
ssl_for_http = self._settings.browser_monitoring.ssl_for_http
intrinsics['sslForHttp'] = ssl_for_http
return intrinsics
class WSGIHeaderProxy(object):
def __init__(self, environ):
self.environ = environ
self.length = None
@staticmethod
def _to_wsgi(key):
key = key.upper()
if key == 'CONTENT-LENGTH':
return 'CONTENT_LENGTH'
elif key == 'CONTENT-TYPE':
return 'CONTENT_TYPE'
return 'HTTP_' + key.replace('-', '_')
@staticmethod
def _from_wsgi(key):
key = key.lower()
return key[5:].replace('_', '-')
def __getitem__(self, key):
wsgi_key = self._to_wsgi(key)
return self.environ[wsgi_key]
def __iter__(self):
for key in self.environ:
if key == 'CONTENT_LENGTH':
yield 'content-length', self.environ['CONTENT_LENGTH']
elif key == 'CONTENT_TYPE':
yield 'content-type', self.environ['CONTENT_TYPE']
elif key == 'HTTP_CONTENT_LENGTH' or key == 'HTTP_CONTENT_TYPE':
# These keys are illegal and should be ignored
continue
elif key.startswith('HTTP_'):
yield self._from_wsgi(key), self.environ[key]
def __len__(self):
if self.length is None:
self.length = sum(1 for _ in iter(self))
return self.length
class WSGIWebTransaction(WebTransaction):
MOD_WSGI_HEADERS = ('mod_wsgi.request_start', 'mod_wsgi.queue_start')
def __init__(self, application, environ):
# The web transaction can be enabled/disabled by
# the value of the variable "newrelic.enabled"
# in the WSGI environ dictionary. We need to check
# this before initialising the transaction as needs
# to be passed in base class constructor. The
# default is None, which would then result in the
# base class making the decision based on whether
# application or agent as a whole are enabled.
enabled = _lookup_environ_setting(environ,
'newrelic.enabled', None)
# Initialise the common transaction base class.
super(WSGIWebTransaction, self).__init__(
application, name=None, port=environ.get('SERVER_PORT'),
request_method=environ.get('REQUEST_METHOD'),
query_string=environ.get('QUERY_STRING'),
headers=iter(WSGIHeaderProxy(environ)),
enabled=enabled)
# Disable transactions for websocket connections.
# Also disable autorum if this is a websocket. This is a good idea for
# two reasons. First, RUM is unnecessary for websocket transactions
# anyway. Secondly, due to a bug in the gevent-websocket (0.9.5)
# package, if our _WSGIApplicationMiddleware is applied a websocket
# connection cannot be made.
if _is_websocket(environ):
self.autorum_disabled = True
self.enabled = False
# Bail out if the transaction is running in a
# disabled state.
if not self.enabled:
return
# Will need to check the settings a number of times.
settings = self._settings
# Check for override settings from WSGI environ.
self.background_task = _lookup_environ_setting(environ,
'newrelic.set_background_task', False)
self.ignore_transaction = _lookup_environ_setting(environ,
'newrelic.ignore_transaction', False)
self.suppress_apdex = _lookup_environ_setting(environ,
'newrelic.suppress_apdex_metric', False)
self.suppress_transaction_trace = _lookup_environ_setting(environ,
'newrelic.suppress_transaction_trace', False)
self.capture_params = _lookup_environ_setting(environ,
'newrelic.capture_request_params',
settings.capture_params)
self.autorum_disabled = _lookup_environ_setting(environ,
'newrelic.disable_browser_autorum',
not settings.browser_monitoring.auto_instrument)
# Make sure that if high security mode is enabled that
# capture of request params is still being disabled.
# No warning is issued for this in the logs because it
# is a per request configuration and would create a lot
# of noise.
if settings.high_security:
self.capture_params = False
# LEGACY: capture_params = False
#
# Don't add request parameters at all, which means they will not
# go through the AttributeFilter.
if self.capture_params is False:
self._request_params.clear()
# Extract from the WSGI environ dictionary
# details of the URL path. This will be set as
# default path for the web transaction. This can
# be overridden by framework to be more specific
# to avoid metrics explosion problem resulting
# from too many distinct URLs for same resource
# due to use of REST style URL concepts or
# otherwise.
request_uri = environ.get('REQUEST_URI', None)
if request_uri is None:
# The gunicorn WSGI server uses RAW_URI instead
# of the more typical REQUEST_URI used by Apache
# and other web servers.
request_uri = environ.get('RAW_URI', None)
script_name = environ.get('SCRIPT_NAME', None)
path_info = environ.get('PATH_INFO', None)
self._request_uri = request_uri
if self._request_uri is not None:
# Need to make sure we drop off any query string
# arguments on the path if we have to fallback
# to using the original REQUEST_URI. Can't use
# attribute access on result as only support for
# Python 2.5+.
self._request_uri = urlparse.urlparse(self._request_uri)[2]
if script_name is not None or path_info is not None:
if path_info is None:
path = script_name
elif script_name is None:
path = path_info
else:
path = script_name + path_info
self.set_transaction_name(path, 'Uri', priority=1)
if self._request_uri is None:
self._request_uri = path
else:
if self._request_uri is not None:
self.set_transaction_name(self._request_uri, 'Uri', priority=1)
# mod_wsgi sets its own distinct variables for queue time
# automatically. Initially it set mod_wsgi.queue_start,
# which equated to when Apache first accepted the
# request. This got changed to mod_wsgi.request_start
# however, and mod_wsgi.queue_start was instead used
# just for when requests are to be queued up for the
# daemon process and corresponded to the point at which
# they are being proxied, after Apache does any
# authentication etc. We check for both so older
# versions of mod_wsgi will still work, although we
# don't try and use the fact that it is possible to
# distinguish the two points and just pick up the
# earlier of the two.
for queue_time_header in self.MOD_WSGI_HEADERS:
if self.queue_start > 0.0:
break
value = environ.get(queue_time_header)
if not value:
continue
try:
if value.startswith('t='):
try:
self.queue_start = _parse_time_stamp(float(value[2:]))
except Exception:
pass
else:
try:
self.queue_start = _parse_time_stamp(float(value))
except Exception:
pass
except Exception:
pass
def __exit__(self, exc, value, tb):
self.record_custom_metric('Python/WSGI/Input/Bytes',
self._bytes_read)
self.record_custom_metric('Python/WSGI/Input/Time',
self.read_duration)
self.record_custom_metric('Python/WSGI/Input/Calls/read',
self._calls_read)
self.record_custom_metric('Python/WSGI/Input/Calls/readline',
self._calls_readline)
self.record_custom_metric('Python/WSGI/Input/Calls/readlines',
self._calls_readlines)
self.record_custom_metric('Python/WSGI/Output/Bytes',
self._bytes_sent)
self.record_custom_metric('Python/WSGI/Output/Time',
self.sent_duration)
self.record_custom_metric('Python/WSGI/Output/Calls/yield',
self._calls_yield)
self.record_custom_metric('Python/WSGI/Output/Calls/write',
self._calls_write)
return super(WSGIWebTransaction, self).__exit__(exc, value, tb)
def _update_agent_attributes(self):
# Add WSGI agent attributes
if self.read_duration != 0:
self._add_agent_attribute('wsgi.input.seconds',
self.read_duration)
if self._bytes_read != 0:
self._add_agent_attribute('wsgi.input.bytes',
self._bytes_read)
if self._calls_read != 0:
self._add_agent_attribute('wsgi.input.calls.read',
self._calls_read)
if self._calls_readline != 0:
self._add_agent_attribute('wsgi.input.calls.readline',
self._calls_readline)
if self._calls_readlines != 0:
self._add_agent_attribute('wsgi.input.calls.readlines',
self._calls_readlines)
if self.sent_duration != 0:
self._add_agent_attribute('wsgi.output.seconds',
self.sent_duration)
if self._bytes_sent != 0:
self._add_agent_attribute('wsgi.output.bytes',
self._bytes_sent)
if self._calls_write != 0:
self._add_agent_attribute('wsgi.output.calls.write',
self._calls_write)
if self._calls_yield != 0:
self._add_agent_attribute('wsgi.output.calls.yield',
self._calls_yield)
return super(WSGIWebTransaction, self)._update_agent_attributes()
def process_response(self, status, response_headers, *args):
"""Processes response status and headers, extracting any
details required and returning a | |
"""Functions for generating random quantum objects and states.
"""
import os
import math
import random
from importlib.util import find_spec
from functools import wraps, lru_cache
from numbers import Integral
import numpy as np
import scipy.sparse as sp
from ..core import (qarray, dag, dot, rdmul, complex_array, get_thread_pool,
_NUM_THREAD_WORKERS, qu, ptr, kron, nmlz, prod,
vectorize, pvectorize)
# -------------------------------- RANDOMGEN -------------------------------- #
if (
find_spec('randomgen') and
os.environ.get('QUIMB_USE_RANDOMGEN', '').lower() not in {'false', 'off'}
):
_RANDOM_GENS = []
@lru_cache(2)
def _get_randomgens(num_threads):
"""Cached generation of random number generators, enables
``random_seed_fn`` functionality and greater efficiency.
"""
global _RANDOM_GENS
num_gens = len(_RANDOM_GENS)
if num_gens < num_threads:
from randomgen import Xoroshiro128
# add more generators if not enough
for _ in range(num_threads - num_gens):
_RANDOM_GENS.append(Xoroshiro128())
return _RANDOM_GENS[:num_threads]
def seed_rand(seed):
# all RNGs inherit state from the first RNG of _get_randomgens
_get_randomgens(1)[0].seed(seed)
def randn(shape=(), dtype=float, scale=1.0, loc=0.0,
num_threads=None, seed=None, dist='normal'):
"""Fast multithreaded generation of random normally distributed data
using ``randomgen``.
Parameters
----------
shape : tuple[int]
The shape of the output random array.
dtype : {'complex128', 'float64', 'complex64' 'float32'}, optional
The data-type of the output array.
scale : float, optional
The width of the distribution (standard deviation if
``dist='normal'``).
loc : float, optional
The location of the distribution (lower limit if
``dist='uniform'``).
num_threads : int, optional
How many threads to use. If ``None``, decide automatically.
dist : {'normal', 'uniform'}
Type of random number to generate.
"""
if seed is not None:
seed_rand(seed)
if isinstance(shape, Integral):
d = shape
shape = (shape,)
else:
d = prod(shape)
if num_threads is None:
# only multi-thread for big ``d``
if d <= 32768:
num_threads = 1
else:
num_threads = _NUM_THREAD_WORKERS
rgs = _get_randomgens(num_threads)
gen_method = {
'normal': 'standard_normal',
'uniform': 'random_sample'
}[dist]
# sequential generation
if num_threads <= 1:
def create(d, dtype):
out = np.empty(d, dtype)
getattr(rgs[0].generator, gen_method)(out=out, dtype=dtype)
return out
# threaded generation
else:
pool = get_thread_pool()
# copy state to all RGs and jump to ensure no overlap
for rg in rgs[1:]:
rg.state = rgs[0].state
rgs[0].jump()
gens = [thread_rg.generator for thread_rg in rgs]
S = math.ceil(d / num_threads)
def _fill(gen, out, dtype, first, last):
getattr(gen, gen_method)(out=out[first:last], dtype=dtype)
def create(d, dtype):
out = np.empty(d, dtype)
# submit thread work
fs = [
pool.submit(_fill, gen, out, dtype, i * S, (i + 1) * S)
for i, gen in enumerate(gens)
]
# wait for completion
[f.result() for f in fs]
return out
if np.issubdtype(dtype, np.floating):
out = create(d, dtype)
elif np.issubdtype(dtype, np.complexfloating):
# need to sum two real arrays if generating complex numbers
if np.issubdtype(dtype, np.complex64):
sub_dtype = np.float32
else:
sub_dtype = np.float64
out = complex_array(create(d, sub_dtype), create(d, sub_dtype))
else:
raise ValueError("dtype {} not understood.".format(dtype))
if out.dtype != dtype:
out = out.astype(dtype)
if scale != 1.0:
out *= scale
if loc != 0.0:
out += loc
return out.reshape(shape)
def rand(*args, **kwargs):
return randn(*args, dist='uniform', **kwargs)
def randint(*args, **kwargs):
return _get_randomgens(1)[0].generator.randint(*args, **kwargs)
def choice(*args, **kwargs):
return _get_randomgens(1)[0].generator.choice(*args, **kwargs)
# ---------------------------------- NUMPY ---------------------------------- #
else: # pragma: no cover
def seed_rand(seed):
np.random.seed(seed)
def randn(shape=(), dtype=float, scale=1.0, loc=0.0,
seed=None, dist='normal'):
"""Generate normally distributed random array of certain shape and type.
Like :func:`numpy.random.randn` but can specify ``dtype``.
Parameters
----------
shape : tuple[int]
The shape of the array.
dtype : {float, complex, ...}, optional
The numpy data type.
scale : float, optional
The width of the distribution (standard deviation if
``dist='normal'``).
loc : float, optional
The location of the distribution (lower limit if
``dist='uniform'``).
dist : {'normal', 'uniform'}
Type of random number to generate.
Returns
-------
A : array
"""
if seed is not None:
seed_rand(seed)
def create():
if dist == 'normal':
return np.random.normal(loc=loc, scale=scale, size=shape)
elif dist == 'uniform':
return np.random.uniform(low=loc, high=loc + scale, size=shape)
else:
raise ValueError("Distribution '{}' not valid.".format(dist))
# real datatypes
if np.issubdtype(dtype, np.floating):
x = create()
# complex datatypes
elif np.issubdtype(dtype, np.complexfloating):
x = complex_array(create(), create())
else:
raise TypeError("dtype {} not understood - should be float or "
"complex.".format(dtype))
if x.dtype != dtype:
x = x.astype(dtype)
return x
choice = np.random.choice
randint = np.random.randint
rand = np.random.rand
def random_seed_fn(fn):
"""Modify ``fn`` to take a ``seed`` argument.
"""
@wraps(fn)
def wrapped_fn(*args, seed=None, **kwargs):
if seed is not None:
seed_rand(seed)
return fn(*args, **kwargs)
return wrapped_fn
@random_seed_fn
def rand_rademacher(shape, scale=1, dtype=float):
"""
"""
if np.issubdtype(dtype, np.floating):
entries = np.array([1.0, -1.0]) * scale
need2convert = dtype not in (float, np.float_)
elif np.issubdtype(dtype, np.complexfloating):
entries = np.array([1.0, -1.0, 1.0j, -1.0j]) * scale
need2convert = dtype not in (complex, np.complex_)
else:
raise TypeError("dtype {} not understood - should be float or complex."
"".format(dtype))
x = choice(entries, shape)
if need2convert:
x = x.astype(dtype)
return x
def _phase_to_complex_base(x):
return 1j * math.sin(x) + math.cos(x)
_phase_sigs = ['complex64(float32)', 'complex128(float64)']
_phase_to_complex_seq = vectorize(_phase_sigs)(_phase_to_complex_base)
_phase_to_complex_par = pvectorize(_phase_sigs)(_phase_to_complex_base)
def phase_to_complex(x):
if x.size >= 512:
return _phase_to_complex_par(x)
# XXX: this is not as fast as numexpr - investigate?
return _phase_to_complex_seq(x)
@random_seed_fn
def rand_phase(shape, scale=1, dtype=complex):
"""Generate random complex numbers distributed on the unit sphere.
"""
if not np.issubdtype(dtype, np.complexfloating):
raise ValueError("dtype must be complex, got '{}'.".format(dtype))
if np.issubdtype(dtype, np.complex64):
sub_dtype = np.float32
else:
sub_dtype = np.float64
phi = randn(shape, dtype=sub_dtype, scale=2 * math.pi, dist='uniform')
z = phase_to_complex(phi)
if scale != 1:
z *= scale
return z
def rand_matrix(d, scaled=True, sparse=False, stype='csr',
density=None, dtype=complex, seed=None):
"""Generate a random matrix of order `d` with normally distributed
entries. If `scaled` is `True`, then in the limit of large `d` the
eigenvalues will be distributed on the unit complex disk.
Parameters
----------
d : int
Matrix dimension.
scaled : bool, optional
Whether to scale the matrices values such that its spectrum
approximately lies on the unit disk (for dense matrices).
sparse : bool, optional
Whether to produce a sparse matrix.
stype : {'csr', 'csc', 'coo', ...}, optional
The type of sparse matrix if ``sparse=True``.
density : float, optional
Target density of non-zero elements for the sparse matrix. By default
aims for about 10 entries per row.
dtype : {complex, float}, optional
The data type of the matrix elements.
Returns
-------
mat : qarray or sparse matrix
Random matrix.
"""
if np.issubdtype(dtype, np.floating):
iscomplex = False
elif np.issubdtype(dtype, np.complexfloating):
iscomplex = True
else:
raise TypeError("dtype {} not understood - should be "
"float or complex.".format(dtype))
# handle seed manually since standard python random.seed might be called
if seed is not None:
seed_rand(seed)
if sparse:
# Aim for 10 non-zero values per row, but betwen 1 and d/2
density = min(10, d / 2) / d if density is None else density
density = min(max(d**-2, density, ), 1.0)
nnz = round(density * d * d)
if density > 0.1:
# take special care to avoid duplicates
if seed is not None:
random.seed(seed)
ijs = random.sample(range(0, d**2), k=nnz)
else:
ijs = randint(0, d * d, size=nnz)
# want to sample nnz unique (d, d) pairs without building list
i, j = np.divmod(ijs, d)
data = randn(nnz, dtype=dtype)
mat = sp.coo_matrix((data, (i, j)), shape=(d, d)).asformat(stype)
else:
density = 1.0
mat = qarray(randn((d, d), dtype=dtype))
if scaled:
mat /= ((2 if iscomplex else 1) * d * density)**0.5
return mat
@random_seed_fn
def rand_herm(d, sparse=False, density=None, dtype=complex):
"""Generate a random hermitian operator of order `d` with normally
distributed entries. In the limit of large `d` the spectrum will be a
semi-circular distribution between [-1, 1].
See Also
--------
rand_matrix, rand_pos, rand_rho, rand_uni
"""
if sparse:
density = 10 / d if density is None else density
density = min(max(density, d**-2), 1 - d**-2)
density /= 2 # to account of herm construction
herm = rand_matrix(d, scaled=True, sparse=sparse,
density=density, dtype=dtype)
if sparse:
herm.data /= (2**1.5)
else:
herm /= (2**1.5)
herm += dag(herm)
return herm
@random_seed_fn
def rand_pos(d, sparse=False, density=None, dtype=complex):
"""Generate a random positive operator of size `d`, with normally
distributed entries. In the limit of large `d` the spectrum will lie
between [0, 1].
See Also
--------
rand_matrix, rand_herm, rand_rho, rand_uni
"""
if sparse:
density = 10 / d if density is None else density
density = min(max(density, d**-2), 1 - d**-2)
density = 0.5 * (density / d)**0.5 # to account for pos construction
pos = rand_matrix(d, scaled=True, sparse=sparse,
| |
import time
import curses
import sys
import os
import multiprocessing as mp
import pandas as pd
import numpy as np
import emcee
import h5py
from radvel import utils
import radvel
class StateVars(object):
def __init__(self):
self.oac = 0
self.autosamples = []
self.automean = []
self.automin = []
self.automax = []
self.proceed_started = 0
def reset(self):
self.__init__()
statevars = StateVars()
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def _closescr():
if isnotebook() == False:
try:
curses.endwin()
except:
pass
def _progress_bar(step, totsteps, width=50):
fltot = float(totsteps)
numsym = int(np.round(width * (step / fltot)))
bar = ''.join(["=" for s in range(numsym)])
bar += ''.join([" " for s in range(width - numsym)])
msg = "[" + bar + "]"
return(msg)
def _status_message_NB(statevars):
msg1 = (
"{:d}/{:d} ({:3.1f}%) steps complete; "
"Running {:.2f} steps/s; Mean acceptance rate = {:3.1f}%; "
"Min Auto Factor = {:3.0f}; Max Auto Relative-Change = {:5.3}; "
"Min Tz = {:.1f}; Max G-R = {:5.3f}\r"
).format(statevars.ncomplete, statevars.totsteps, statevars.pcomplete, statevars.rate, statevars.ar,
statevars.minafactor, statevars.maxarchange, statevars.mintz, statevars.maxgr)
sys.stdout.write(msg1)
sys.stdout.flush()
def _status_message_CLI(statevars):
statevars.screen = curses.initscr()
statevars.screen.clear()
barline = _progress_bar(statevars.ncomplete, statevars.totsteps)
msg1 = (
barline + " {:d}/{:d} ({:3.1f}%) steps complete; "
).format(statevars.ncomplete, statevars.totsteps, statevars.pcomplete)
msg2 = (
"Running {:.2f} steps/s; Mean acceptance rate = {:3.1f}%; "
"Min Auto Factor = {:3.0f}; \nMax Auto Relative-Change = {:5.3}; "
"Min Tz = {:.1f}; Max G-R = {:5.3f}\n"
).format(statevars.rate, statevars.ar, statevars.minafactor, statevars.maxarchange,
statevars.mintz, statevars.maxgr)
statevars.screen.addstr(0, 0, msg1+ '\n' + msg2)
statevars.screen.refresh()
def convergence_check(minAfactor, maxArchange, maxGR, minTz, minsteps, minpercent):
"""Check for convergence
Check for convergence for a list of emcee samplers
Args:
minAfactor (float): Minimum autocorrelation time factor for chains to be deemed well-mixed and halt the MCMC run
maxArchange (float): Maximum relative change in the autocorrelative time to be deemed well-mixed and
halt the MCMC run
maxGR (float): Maximum G-R statistic for chains to be deemed well-mixed and halt the MCMC run
minTz (int): Minimum Tz to consider well-mixed
minsteps (int): Minimum number of steps per walker before convergence tests are performed. Convergence checks
will start after the minsteps threshold or the minpercent threshold has been hit.
minpercent (float): Minimum percentage of total steps before convergence tests are performed. Convergence checks
will start after the minsteps threshold or the minpercent threshold has been hit.
"""
statevars.ar = 0
statevars.ncomplete = statevars.nburn
statevars.lnprob = []
statevars.autocorrelation = []
statevars.chains = []
for i,sampler in enumerate(statevars.samplers):
statevars.ncomplete += sampler.get_log_prob(flat=True).shape[0]
statevars.ar += sampler.acceptance_fraction.mean() * 100
statevars.chains.append(sampler.get_chain()[:,:,:].T)
statevars.lnprob.append(sampler.get_log_prob(flat=True))
statevars.ar /= statevars.ensembles
statevars.pcomplete = statevars.ncomplete/float(statevars.totsteps) * 100
statevars.rate = (statevars.checkinterval*statevars.nwalkers*statevars.ensembles) / statevars.interval
if statevars.ensembles < 3:
# if less than 3 ensembles then GR between ensembles does
# not work so just calculate it on the last sampler
statevars.tchains = sampler.chain.transpose()
# Must have completed at least 5% or minsteps steps per walker before
# attempting to calculate GR
if statevars.pcomplete < minpercent and sampler.get_log_prob(flat=True).shape[0] <= minsteps*statevars.nwalkers:
(statevars.ismixed, statevars.minafactor, statevars.maxarchange, statevars.maxgr,
statevars.mintz) = 0, -1.0, np.inf, np.inf, -1.0
else:
(statevars.ismixed, afactor, archange, oac, gr, tz) \
= convergence_calculate(statevars.chains,
oldautocorrelation=statevars.oac, minAfactor=minAfactor, maxArchange=maxArchange,
maxGR=maxGR, minTz=minTz)
statevars.mintz = min(tz)
statevars.maxgr = max(gr)
statevars.minafactor = np.amin(afactor)
statevars.maxarchange = np.amax(archange)
statevars.oac = oac
if statevars.burn_complete:
statevars.autosamples.append(len(statevars.chains)*statevars.chains[0].shape[2])
statevars.automean.append(np.mean(statevars.oac))
statevars.automin.append(np.amin(statevars.oac))
statevars.automax.append(np.amax(statevars.oac))
if statevars.ismixed:
statevars.mixcount += 1
else:
statevars.mixcount = 0
if isnotebook():
_status_message_NB(statevars)
else:
_status_message_CLI(statevars)
def _domcmc(input_tuple):
"""Function to be run in parallel on different CPUs
Input is a tuple: first element is an emcee sampler object, second is an array of
initial positions, third is number of steps to run before doing a convergence check
"""
sampler = input_tuple[0]
ipos = input_tuple[1]
check_interval = input_tuple[2]
sampler.run_mcmc(ipos, check_interval)
return sampler
def mcmc(post, nwalkers=50, nrun=10000, ensembles=8, checkinterval=50, minAfactor=40, maxArchange=.03, burnAfactor=25,
burnGR=1.03, maxGR=1.01, minTz=1000, minsteps=1000, minpercent=5, thin=1, serial=False, save=False,
savename=None, proceed=False, proceedname=None):
"""Run MCMC
Run MCMC chains using the emcee EnsambleSampler
Args:
post (radvel.posterior): radvel posterior object
nwalkers (int): (optional) number of MCMC walkers
nrun (int): (optional) number of steps to take
ensembles (int): (optional) number of ensembles to run. Will be run
in parallel on separate CPUs
checkinterval (int): (optional) check MCMC convergence statistics every
`checkinterval` steps
minAfactor (float): Minimum autocorrelation time factor to deem chains as well-mixed and halt the MCMC run
maxArchange (float): Maximum relative change in autocorrelation time to deem chains and well-mixed
burnAfactor (float): Minimum autocorrelation time factor to stop burn-in period. Burn-in ends once burnGr
or burnAfactor are reached.
burnGR (float): (optional) Maximum G-R statistic to stop burn-in period. Burn-in ends once burnGr or
burnAfactor are reached.
maxGR (float): (optional) Maximum G-R statistic for chains to be deemed well-mixed and halt the MCMC run
minTz (int): (optional) Minimum Tz to consider well-mixed
minsteps (int): Minimum number of steps per walker before convergence tests are performed. Convergence checks
will start after the minsteps threshold or the minpercent threshold has been hit.
minpercent (float): Minimum percentage of total steps before convergence tests are performed. Convergence checks
will start after the minsteps threshold or the minpercent threshold has been hit.
thin (int): (optional) save one sample every N steps (default=1, save every sample)
serial (bool): set to true if MCMC should be run in serial
save (bool): set to true to save MCMC chains that can be continued in a future run
savename (string): location of h5py file where MCMC chains will be saved for future use
proceed (bool): set to true to continue a previously saved run
proceedname (string): location of h5py file with previously MCMC run chains
Returns:
DataFrame: DataFrame containing the MCMC samples
"""
try:
if save and savename is None:
raise ValueError('save set to true but no savename provided')
if save:
h5f = h5py.File(savename, 'a')
if proceed:
if proceedname is None:
raise ValueError('proceed set to true but no proceedname provided')
else:
h5p = h5py.File(savename, 'r')
msg = 'Loading chains and run information from previous MCMC'
print(msg)
statevars.prechains = []
statevars.prelog_probs = []
statevars.preaccepted = []
statevars.preburned = h5p['burned'][0]
statevars.minafactor = h5p['crit'][0]
statevars.maxarchange = h5p['crit'][1]
statevars.mintz = h5p['crit'][2]
statevars.maxgr = h5p['crit'][3]
statevars.autosamples = list(h5p['autosample'])
statevars.automin = list(h5p['automin'])
statevars.automean = list(h5p['automean'])
statevars.automax = list(h5p['automax'])
for i in range(0,int((len(h5p.keys()) - 6)/3)):
str_chain = str(i) + '_chain'
str_log_prob = str(i) + '_log_prob'
str_accepted = str(i) + '_accepted'
statevars.prechains.append(h5p[str_chain])
statevars.prelog_probs.append(h5p[str_log_prob])
statevars.preaccepted.append(h5p[str_accepted])
# check if one or more likelihoods are GPs
if isinstance(post.likelihood, radvel.likelihood.CompositeLikelihood):
check_gp = [like for like in post.likelihood.like_list if isinstance(like, radvel.likelihood.GPLikelihood)]
else:
check_gp = isinstance(post.likelihood, radvel.likelihood.GPLikelihood)
np_info = np.__config__.blas_opt_info
if 'extra_link_args' in np_info.keys() \
and check_gp \
and ('-Wl,Accelerate' in np_info['extra_link_args']) \
and serial == False:
print("WARNING: Parallel processing with Gaussian Processes will not work with your current"
+ " numpy installation. See radvel.readthedocs.io/en/latest/OSX-multiprocessing.html"
+ " for more details. Running in serial with " + str(ensembles) + " ensembles.")
serial = True
statevars.ensembles = ensembles
statevars.nwalkers = nwalkers
statevars.checkinterval = checkinterval - 1
nrun = int(nrun)
# Get an initial array value
pi = post.get_vary_params()
statevars.ndim = pi.size
if nwalkers < 2 * statevars.ndim:
print("WARNING: Number of walkers is less than 2 times number of free parameters. " +
"Adjusting number of walkers to {}".format(2 * statevars.ndim))
statevars.nwalkers = 2 * statevars.ndim
if proceed:
if len(h5p.keys()) != (3 * statevars.ensembles + 6) or h5p['0_chain'].shape[2] != statevars.ndim \
or h5p['0_chain'].shape[1] != statevars.nwalkers:
raise ValueError('nensembles, nwalkers, and the number of ' +
'parameters must be equal to those from previous run.')
# set up perturbation size
pscales = []
for par in post.list_vary_params():
val = post.params[par].value
if post.params[par].mcmcscale is None:
if par.startswith('per'):
pscale = np.abs(val * 1e-5*np.log10(val))
elif par.startswith('logper'):
pscale = np.abs(1e-5 * val)
elif par.startswith('tc'):
pscale = 0.1
else:
pscale = np.abs(0.10 * val)
post.params[par].mcmc_scale = pscale
else:
pscale = post.params[par].mcmcscale
pscales.append(pscale)
pscales = np.array(pscales)
statevars.samplers = []
statevars.samples = []
statevars.initial_positions = []
for e in range(ensembles):
pi = post.get_vary_params()
p0 = np.vstack([pi]*statevars.nwalkers)
p0 += [np.random.rand(statevars.ndim)*pscales for i in range(statevars.nwalkers)]
if not proceed:
statevars.initial_positions.append(p0)
| |
color='r'))
if Rx != []:
irx = np.unique(sig[nz[1:-1] - 1], return_index=True)[1]
irx2 = np.kron(irx, [1, 1])
rx = ps[irx2]
rx[range(0, len(rx), 2)] = Rx
lines.extend(ax.plot(rx[:, 0], rx[:, 1], color='b'))
return (fig, ax, lines)
# lines=[]
# for s in sig:
# l=[self.Gs.pos[s[ii]] for ii in xrange(len(s))]
# if Tx!=None and Rx!=None:
# l.insert(0,Tx)
# l.insert(-1,Rx)
# ls=sh.LineString(l)
# x,y=ls.xy
# lines.extend(ax.plot(x,y,'k',lw=0.1,alpha=0.2))
# return (fig,ax,lines)
# def distwall(self, p, nroom):
# """ calculate distance to wall
#
# Parameters
# ----------
#
# p : ndarray
# point coordinate
#
# nroom : int
# room number of p
#
# Returns
# -------
#
# dist
# list of distances to walls of room nroom
#
# Notes
# -----
#
# Return dist a list of all the distances to the walls of a room
#
#
# """
# pp = sh.Point(p[0], p[1])
#
# dist = []
# p0_xy = []
# p1_xy = []
#
# vnode = self.Gr.node[nroom]['cycle'].cycle
#
# # for j in range(len(Gr[nroom]['vnodes'])):
# for j in range(len(vnodes)):
# nn = self.b_Gr[5]['vnodes'][j]
# nta = G1.tahe[0, nn - 1]
# nhe = G1.tahe[1, nn - 1]
# p0 = np.array([G1.pt[0, nta], G1.pt[1, nta]])
# p1 = np.array([G1.pt[0, nhe], G1.pt[1, nhe]])
# p0_xy.insert(j, p0)
# p1_xy.insert(j, p1)
#
# pstartwll = np.array(p0_xy)
# pfinwll = np.array(p1_xy)
#
# for i in range(len(self.b_Gr[nroom]['vnodes'])):
# line_wall = sh.LineString([(pstartwll[i, 0],
# pstartwll[i, 1]), (pfinwll[i, 0], pfinwll[i, 1])])
# dist.insert(i, line_wall.distance(pp))
# return(dist)
def randTxRx(self):
"""returns random coordinates for Tx and Rx.
Returns
-------
p_Tx : numpy.ndarray
A point of the placement of the Tx
p_Rx : numpy.ndarray
A point of the placement of the Rx
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('defstr.lay')
>>> p_Tx,p_Rx = L.randTxRx()
Notes
-----
ex fn Tx_Rx_pos
"""
# self.boundary()
Tx_x = rd.uniform(self.ax[0], self.ax[1])
Tx_y = rd.uniform(self.ax[2], self.ax[3])
Rx_x = rd.uniform(self.ax[0], self.ax[1])
Rx_y = rd.uniform(self.ax[2], self.ax[3])
p_Tx = np.array([Tx_x, Tx_y])
p_Rx = np.array([Rx_x, Rx_y])
return(p_Tx, p_Rx)
def boundary(self, percx=0.15, percy=0.15, xlim=(), force=False, minD=10):
""" add a blank boundary around layout
Parameters
----------
percx : float
percentage of Dx for x offset calculation (default 0.15)
percy : float
percentage of Dy for y offset calculation (default 0.15)
minD : miimum distance for boundary
force : boolean
force modification of boundaries
self.lboundary is the list of the nodes of the added boundary
self.axn is the zone without the boundary extension
self.ax is updated
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('defstr.lay')
>>> L.boundary()
"""
if not self.hasboundary or force:
if xlim!=():
xmin = xlim[0]
xmax = xlim[1]
ymin = xlim[2]
ymax = xlim[3]
elif len(self.Gs.pos.values()) != 0:
xmax = max(p[0] for p in self.Gs.pos.values())
xmin = min(p[0] for p in self.Gs.pos.values())
ymax = max(p[1] for p in self.Gs.pos.values())
ymin = min(p[1] for p in self.Gs.pos.values())
else:
xmin = -20.
xmax = 20.
ymin = -10.
ymax = 10.
Dx = np.maximum(xmax - xmin,minD)
Dy = np.maximum(ymax - ymin,minD)
dx = Dx * percx
dy = Dy * percy
n1 = self.add_fnod((xmin - dx, ymin - dy))
n2 = self.add_fnod((xmax + dx, ymin - dy))
n3 = self.add_fnod((xmax + dx, ymax + dy))
n4 = self.add_fnod((xmin - dx, ymax + dy))
self.lboundary = [n1, n2, n3, n4]
self.segboundary = []
ns1 = self.add_segment(n1, n2, name='_AIR')
ns2 = self.add_segment(n2, n3, name='_AIR')
ns3 = self.add_segment(n3, n4, name='_AIR')
ns4 = self.add_segment(n4, n1, name='_AIR')
self.segboundary.append(ns1)
self.segboundary.append(ns2)
self.segboundary.append(ns3)
self.segboundary.append(ns4)
self.axn = (xmin, xmax, ymin, ymax)
self.ax = (xmin - dx, xmax + dx, ymin - dy, ymax + dy)
self.display['box'] = self.ax
self.hasboundary = True
self.g2npy()
elif xlim!=():
# change points coordinates
self.Gs.pos[self.lboundary[0]]=(xlim[0],xlim[2])
self.Gs.pos[self.lboundary[1]]=(xlim[1],xlim[2])
self.Gs.pos[self.lboundary[2]]=(xlim[1],xlim[3])
self.Gs.pos[self.lboundary[3]]=(xlim[0],xlim[3])
self.ax = xlim
self.display['box'] = xlim
self.g2npy()
def off_overlay(self, dx=0, dy=0):
""" offset overlay image
Parameters
----------
dx : float
dy : float
"""
axis = (self.ax[0] + dx, self.ax[1] + dx,
self.ax[2] + dy, self.ax[3] + dy)
self.display['overlay_axis'] = axis
def scl_overlay(self, ax=1.0, ay=1.0):
""" scale overlay image
Parameters
----------
ax : float
ay : float
"""
axis = (self.ax[0] * ax, self.ax[1] * ax,
self.ax[2] * ay, self.ax[3] * ay)
self.display['overlay_axis'] = axis
def get_paths(self, nd_in, nd_fin):
""" returns the possible paths of graph Gs between two nodes.
Parameters
----------
nd_in: int
initial graph node (segment or point)
nd_fin: int
final graph node (segment or point)
Returns
-------
paths : list
paths between nd_in and nd_fin
"""
paths = gph.find_all_paths(self.Gs, nd_in, nd_fin)
return paths
def outputGi_func_test(args):
for k in range(10000):
y = k*k+k*k
return y
def outputGi_func(args):
# def outputGi_func(e, Gi_no, Gi_A, Gspos, sgsg, s2pc, s2pu):
# for k in range(10000):
# y = k*k
# # time.sleep(0.01)
# return y
def Gspos(n):
if n>0:
#return np.mean(s2pc[n].reshape(2,2),axis=0)
return np.mean(s2pc[n].toarray().reshape(2,2),axis=0)
else:
return p2pc[-n]
e = args[0]
#Gi_no = args[1]
#Gi_A = args[2]
#p2pc = args[3]
#sgsg = args[4]
#s2pc = args[5]
#s2pu = args[6]
print(e)
i0 = e[0]
i1 = e[1]
nstr0 = i0[0]
nstr1 = i1[0]
# list of authorized outputs. Initialized void
output = []
# nstr1 : segment number of central interaction
if nstr1 > 0:
# central interaction is a segment
# pseg1 = self.s2pc[nstr1,:].toarray().reshape(2, 2).T
pseg1 = s2pc[nstr1,:].toarray().reshape(2, 2).T
# pseg1 = self.s2pc[nstr1,:].data.reshape(2, 2).T
# pseg1o = self.seg2pts(nstr1).reshape(2, 2).T
# create a Cone object
cn = cone.Cone()
# if starting from segment
if nstr0 > 0:
# pseg0 = self.s2pc[nstr0,:].toarray().reshape(2, 2).T
pseg0 = s2pc[nstr0,:].toarray().reshape(2, 2).T
# pseg0 = self.s2pc[nstr0,:].data.reshape(2, 2).T
# pseg0o = self.seg2pts(nstr0).reshape(2, 2).T
# if nstr0 and nstr1 are connected segments
if sgsg[nstr0,nstr1] == 0:
# from 2 not connected segment
cn.from2segs(pseg0, pseg1)
else:
# from 2 connected segments
cn.from2csegs(pseg0, pseg1)
# if starting from a point
else:
pt = Gspos(nstr0)
cn.fromptseg(pt, pseg1)
# list all potential successors of interaction i1
ui2 = Gi_no.index(i1)
ui = np.where(Gi_A[ui2,:]!=0)[0]
i2 = [Gi_no[u] for u in ui]
# i2 = nx.neighbors(self.Gi, i1)
# how to find neighbors without network
# ngi=L.Gi.nodes()
# A=nx.adjacency_matrix(L.Gi)
# inter = ngi[10]
# u = ngi.index(inter)
# ui = A[u,:].indices
# neigh_inter = np.array([ngi[u] for u in ui])
ipoints = [x for x in i2 if len(x)==1 ]
#ipoints = filter(lambda x: len(x) == 1, i2)
pipoints = np.array([Gspos(ip[0]) for ip in ipoints]).T
# filter tuple (R | T)
#istup = filter(lambda x : type(eval(x))==tuple,i2)
# map first argument segment number
#isegments = np.unique(map(lambda x : eval(x)[0],istup))
# isegments = np.unique(
# filter(lambda y: y > 0, map(lambda x: x[0], i2)))
isegments = np.unique([x[0] for x in i2 if x[0]>0])
# if nstr0 and nstr1 are adjescent segment remove nstr0 from
# potential next interaction
# Fix 01/2017
# This is not always True if the angle between
# the two adjascent segments is < pi/2
# nb_nstr0 = self.Gs.neighbors(nstr0)
# nb_nstr1 = self.Gs.neighbors(nstr1)
# nb_nstr0 = np.array([self.s2pu[nstr0,0],self.s2pu[nstr0,1]])
# nb_nstr1 = np.array([self.s2pu[nstr1,0],self.s2pu[nstr1,1]])
nb_nstr0 = s2pu[nstr0,:].toarray()[0]
nb_nstr1 = s2pu[nstr1,:].toarray()[0]
print('nb_nstr0',nb_nstr0)
#nb_nstr0 = s2pu[nstr0,:]
#nb_nstr1 = s2pu[nstr1,:]
# common_point = np.intersect1d(nb_nstr0,nb_nstr1)
common_point = np.array([x for x in nb_nstr0 if x in nb_nstr1])
# if len(common_point) == 1:
if common_point.any():
num0 = [x for x in nb_nstr0 if x != common_point]
num1 = [x for x in nb_nstr1 if x != common_point]
p0 = Gspos(num0[0])
p1 = Gspos(num1[0])
pc = Gspos(common_point[0])
v0 = p0-pc
v1 = p1-pc
v0n = v0/np.sqrt(np.sum(v0*v0))
v1n = v1/np.sqrt(np.sum(v1*v1))
if np.dot(v0n,v1n)<=0:
isegments = np.array([ x for x in isegments if x != nstr0 ])
# filter(lambda x: x != nstr0, isegments))
# there are one or more segments
# if len(isegments) > 0:
if isegments.any():
li1 = len(i1)
points = self.s2pc[isegments,:].toarray().T
#points = s2pc[isegments,:].T
# points = self.s2pc[isegments,:].data.reshape(4,len(isegments))
# pointso = self.seg2pts(isegments)
pta = points[0:2, :]
phe = points[2:, :]
# add difraction points
# WARNING Diffraction points are added only if a segment is seen
# it should be the case in 99% of cases
if len(ipoints) > 0:
isegments = np.hstack(
(isegments, np.array(ipoints)[:, 0]))
pta = np.hstack((pta, pipoints))
phe = np.hstack((phe, pipoints))
# cn.show()
# if i0 == (38,79) and i1 == (135,79,23):
# printi0,i1
# import ipdb
# ipdb.set_trace()
# i1 : interaction T
if li1 == 3:
typ, prob = cn.belong_seg(pta, phe)
# if bs.any():
# plu.displot(pta[:,bs],phe[:,bs],color='g')
# if ~bs.any():
# plu.displot(pta[:,~bs],phe[:,~bs],color='k')
# | |
<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import copy
from collections import OrderedDict
import numpy as np
import astropy
from astropy.coordinates import SkyCoord
import fermipy.skymap
from fermipy.data_struct import MutableNamedTuple
def make_default_dict(d):
o = {}
for k, v in d.items():
o[k] = copy.deepcopy(v[0])
return o
def make_default_tuple(d):
vals = [(k, copy.deepcopy(v[0])) for k, v in d.items()]
return MutableNamedTuple(vals)
def make_attrs_class(typename, d):
import attr
vals = {}
for k, v in d.items():
if v[2] == float:
vals[k] = attr.ib(
default=v[0], validator=attr.validators.instance_of(v[2]))
else:
vals[k] = attr.ib(default=v[0])
C = attr.make_class(typename, vals)
return C()
DIFF_FLUX_UNIT = ':math:`\mathrm{cm}^{-2}~\mathrm{s}^{-1}~\mathrm{MeV}^{-1}`'
FLUX_UNIT = ':math:`\mathrm{cm}^{-2}~\mathrm{s}^{-1}`'
ENERGY_FLUX_UNIT = ':math:`\mathrm{MeV}~\mathrm{cm}^{-2}~\mathrm{s}^{-1}`'
# Options that are common to several sections
common = {
'multithread': (False, 'Split the calculation across number of processes set by nthread option.', bool),
'nthread': (None, 'Number of processes to create when multithread is True. If None then one process '
'will be created for each available core.', int),
'model': (None, 'Dictionary defining the spatial/spectral properties of the test source. '
'If model is None the test source will be a PointSource with an Index 2 power-law spectrum.', dict),
'free_background': (False, 'Leave background parameters free when performing the fit. If True then any '
'parameters that are currently free in the model will be fit simultaneously '
'with the source of interest.', bool),
'fix_shape': (False, 'Fix spectral shape parameters of the source of interest. If True then only '
'the normalization parameter will be fit.', bool),
'free_radius': (None, 'Free normalizations of background sources within this angular distance in degrees '
'from the source of interest. If None then no sources will be freed.', float),
'make_plots': (False, 'Generate diagnostic plots.', bool),
'use_weights' : (False, 'Used weighted version of maps in making plots.', bool),
'write_fits': (True, 'Write the output to a FITS file.', bool),
'write_npy': (True, 'Write the output dictionary to a numpy file.', bool),
'loge_bounds': (None, 'Restrict the analysis to an energy range (emin,emax) in '
'log10(E/MeV) that is a subset of the analysis energy range. '
'By default the full analysis energy range will be used. If '
'either emin/emax are None then only an upper/lower bound on '
'the energy range wil be applied.', list),
}
# Options for defining input data files
data = {
'evfile': (None, 'Path to FT1 file or list of FT1 files.', str),
'scfile': (None, 'Path to FT2 (spacecraft) file.', str),
'ltcube': (None, 'Path to livetime cube. If none a livetime cube will be generated with ``gtmktime``.', str),
'cacheft1': (True, 'Cache FT1 files when performing binned analysis. If false then only the counts cube is retained.', bool),
}
# Options for data selection.
selection = {
'emin': (None, 'Minimum Energy (MeV)', float),
'emax': (None, 'Maximum Energy (MeV)', float),
'logemin': (None, 'Minimum Energy (log10(MeV))', float),
'logemax': (None, 'Maximum Energy (log10(MeV))', float),
'tmin': (None, 'Minimum time (MET).', int),
'tmax': (None, 'Maximum time (MET).', int),
'zmax': (None, 'Maximum zenith angle.', float),
'evclass': (None, 'Event class selection.', int),
'evtype': (None, 'Event type selection.', int),
'convtype': (None, 'Conversion type selection.', int),
'phasemin': (None, 'Minimum pulsar phase', float),
'phasemax': (None, 'Maximum pulsar phase', float),
'target': (None, 'Choose an object on which to center the ROI. '
'This option takes precendence over ra/dec or glon/glat.', str),
'ra': (None, '', float),
'dec': (None, '', float),
'glat': (None, '', float),
'glon': (None, '', float),
'radius': (None, 'Radius of data selection. If none this will be automatically set from the ROI size.', float),
'filter': (None, 'Filter string for ``gtmktime`` selection.', str),
'roicut': ('no', '', str)
}
# Options for ROI model.
model = {
'src_radius':
(None,
'Radius of circular region in degrees centered on the ROI that selects '
'sources for inclusion in the model. If this parameter is none then no '
'selection is applied. This selection is ORed with the ``src_roiwidth`` selection.',
float),
'src_roiwidth':
(None,
'Width of square region in degrees centered on the ROI that selects '
'sources for inclusion in the model. If this parameter is none then no '
'selection is applied. This selection will be ORed with the ``src_radius`` selection.', float),
'src_radius_roi':
(None,
'Half-width of ``src_roiwidth`` selection. This parameter can be used in '
'lieu of ``src_roiwidth``.',
float),
'isodiff': (None, 'Set the path to one or more isotropic templates. A separate component will be '
'generated for each item in this list.', list),
'galdiff': (None, 'Set the path to one or more galactic IEM mapcubes. A separate component will be '
'generated for each item in this list.', list),
'limbdiff': (None, '', list),
'diffuse': (None, '', list),
'diffuse_xml': (None, '', list),
'sources': (None, '', list),
'extdir': (None, 'Set a directory that will be searched for extended source FITS templates. Template files in this directory '
'will take precendence over catalog source templates with the same name.', str),
'diffuse_dir': (None, '', list),
'catalogs': (None, '', list),
'merge_sources':
(True, 'Merge properties of sources that appear in multiple '
'source catalogs. If merge_sources=false then subsequent sources with '
'the same name will be ignored.', bool),
'assoc_xmatch_columns':
(['3FGL_Name'], 'Choose a set of association columns on which to '
'cross-match catalogs.', list),
'extract_diffuse': (
False, 'Extract a copy of all mapcube components centered on the ROI.',
bool)
}
# Options for configuring likelihood analysis
gtlike = {
'irfs': (None, 'Set the IRF string.', str),
'edisp': (True, 'Enable the correction for energy dispersion.', bool),
'edisp_disable': (None,
'Provide a list of sources for which the edisp '
'correction should be disabled.',
list),
'minbinsz': (0.05, 'Set the minimum bin size used for resampling diffuse maps.', float),
'rfactor': (2, '', int),
'convolve': (True, '', bool),
'resample': (True, '', bool),
'srcmap': (None, 'Set the source maps file. When defined this file will be used instead of the '
'local source maps file.', str),
'bexpmap': (None, '', str),
'bexpmap_roi': (None, '', str),
'srcmap_base': (None, 'Set the baseline source maps file. This will be used to generate a scaled source map.', str),
'bexpmap_base': (None, 'Set the basline all-sky expoure map file. This will be used to generate a scaled source map.', str),
'bexpmap_roi_base': (None, 'Set the basline ROI expoure map file. This will be used to generate a scaled source map.', str),
'use_external_srcmap': (False, 'Use an external precomputed source map file.', bool),
'use_scaled_srcmap': (False, 'Generate source map by scaling an external srcmap file.', bool),
'wmap': (None, 'Likelihood weights map.', str),
'llscan_npts': (20, 'Number of evaluation points to use when performing a likelihood scan.', int),
'src_expscale': (None, 'Dictionary of exposure corrections for individual sources keyed to source name. The exposure '
'for a given source will be scaled by this value. A value of 1.0 corresponds to the nominal exposure.', dict),
'expscale': (None, 'Exposure correction that is applied to all sources in the analysis component. '
'This correction is superseded by `src_expscale` if it is defined for a source.', float),
}
# Options for generating livetime cubes
ltcube = {
'binsz': (1.0, 'Set the angular bin size for generating livetime cubes.', float),
'phibins': (0, 'Set the number of phi bins for generating livetime cubes.', int),
'dcostheta': (0.025, 'Set the inclination angle binning represented as the cosine of the off-axis angle.', float),
'use_local_ltcube': (False, 'Generate a livetime cube in the vicinity of the ROI using interpolation. '
'This option disables LT cube generation with gtltcube.', bool),
}
# Options for binning.
binning = {
'projtype': ('WCS', 'Projection mode (WCS or HPX).', str),
'proj': ('AIT', 'Spatial projection for WCS mode.', str),
'coordsys': ('CEL', 'Coordinate system of the spatial projection (CEL or GAL).', str),
'npix':
(None,
'Number of pixels. If none then this will be set from ``roiwidth`` '
'and ``binsz``.', int),
'roiwidth': (10.0,
'Width of the ROI in degrees. The number of pixels in each spatial dimension will be set from ``roiwidth`` / ``binsz`` (rounded up).',
float),
'binsz': (0.1, 'Spatial bin size in degrees.', float),
'binsperdec': (8, 'Number of energy bins per decade.', float),
'enumbins': (
None,
'Number of energy bins. If none this will be inferred from energy '
'range and ``binsperdec`` parameter.', int),
'hpx_ordering_scheme': ('RING', 'HEALPix | |
#!/usr/bin/env python2.7
""" Guidelines for Object Oriented Analysis and Design:
1. Write down about the problem
2. Extract Key cnocepts from #1 and reseach them
3. Create a class hierarchy and object map for the concepts - in object has-a is-a fastion
4. Code the classes and a test to run them
5. Repeat and refine iti
It's all like form a very abstract idea and then solidify it further.
Now draw some diagrams depicting the relationship between various things and write description of these things.
- once it's perfect (covers all things needed) separate the nouns and verbs from it (classes/objects and methods).
- ensure that you fully understand it and can visualize it, if not do some research on them and undestand
- get some rough common relations between nouns and how others can be related to each other || a basic hierarchy for classes
- check which of these names are similar things?
* like same thing - for class and instance of it "
* What is basically just another word for another thing?"
- create a basic structure and some code - test that it works
- keep on adding some code and testing it's working, repeat and refine
The mehod used earlier is called top-down method where at top it's just abstract and towards bottom gets more solofied.
There is another way which one can use as one become good at programming and there is some part of this bug puzzle known to you
and can think the problem in terms of code. Some steps for this way (Bottom Up):
1. Take a small piece of the problem; hack on some code and get it to run barely.
2. Refine the code into something more formal with classes and automated tests.
3. Extract the key concepts you're using and try to find research for them.
4. Write up a description of what's really going on.
5. Go back and refine the code, possibly throwing it out and starting over.
6. Repeat, moving on to some other piece of the problem.
Remember that your solution will probably be meandering and weird, so that's why Zed's version of this process involves going
back and finding research then cleaning things up based on what you've learned.
"""
from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet cofigured. Subclass it and implement enter()"
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
while True:
print "\n------------"
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
class Death(Scene):
quips = [ "You dead. You kinda suck a this.",
"Your mom would be proud...if she were smarter",
"Such a luser.",
"I've a small puppy that's better at this."
]
def enter(self):
print Death.quips[randint(0, len(self.quips)-1)]
exit(1)
class CentralCorridor(Scene):
def enter(self):
print "The Gothons of Planet Percal #25 have invaded your ship and destroyed"
print "your entire crew. You are the last surviving member and your last"
print "mission is to get the neutron destruct bomb from the Weapons Armory,"
print "put it in the bridge, and blow the ship up after getting into an "
print "escape pod.\n"
print "You're running down the central corridor to the Weapons Armory when"
print "a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume"
print "flowing around his hate filled body. He's blocking the door to the"
print "Armory and about to pull a weapon to blast you."
action = raw_input("(shoot!/dodge!/tell a joke)> ")
if action == "shoot!":
print "Quick on the draw you yank out your blaster and fire it at the Gothon."
print "His clown costume is flowing and moving around his body, which throws"
print "off your aim. Your laser hits his costume but misses him entirely. This"
print "completely ruins his brand new costume his mother bought him, which"
print "makes him fly into a rage and blast you repeatedly in the face until"
print "you are dead. Then he eats you."
return 'death'
elif action == "dodge!":
print "Like a world class boxer you dodge, weave, slip and slide right"
print "as the Gothon's blaster cranks a laser past your head."
print "In the middle of your artful dodge your foot slips and you"
print "bang your head on the metal wall and pass out."
print "You wake up shortly after only to die as the Gothon stomps on"
print "your head and eats you."
return 'death'
elif action == "tell a joke":
print "Lucky for you they made you learn Gothon insults in the academy."
print "You tell the one Gothon joke you know:"
print "Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr."
print "The Gothon stops, tries not to laugh, then busts out laughing and can't move."
print "While he's laughing you run up and shoot him square in the head"
print "putting him down, then jump through the Weapon Armory door."
return 'laser_weapon_armory'
else:
print "DOES NOT COMPUTE!"
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(sef):
print "You do a dive roll into the Weapon Armory, crouch and scan the room"
print "for more Gothons that might be hiding. It's dead quiet, too quiet."
print "You stand up and run to the far side of the room and find the"
print "neutron bomb in its container. There's a keypad lock on the box"
print "and you need the code to get the bomb out. If you get the code"
print "wrong 10 times then the lock closes forever and you can't"
print "get the bomb. The code is 3 digits."
code = "%d%d%d" % (randint(1,9),randint(1,9),randint(1,9))
try: # cheat code - just type two nos rather than one
guess = int(raw_input("[keypad]> "))
guesses = 0
while guess != code and guesses < 10:
print "BZZZZEDDD!"
guesses += 1
guess = int(raw_input("[keypad]> ")) # The bug in the game - there is no escape unless you compare integer with integer
except ValueError:
guess = code
if guess == code:
print "The container clicks open and the seal breaks, letting gas out."
print "You grab the neutron bomb and run as fast as you can to the"
print "bridge where you must place it in the right spot."
return 'the_bridge'
else:
print "The lock buzzes one last time and then you hear a sickening"
print "melting sound as the mechanism is fused together."
print "You decide to sit there, and finally the Gothons blow up the"
print "ship from their ship and you die."
return 'death'
exit(0)
class TheBridge(Scene):
def enter(self):
print "You burst onto the Bridge with the neutron destruct bomb"
print "under your arm and surprise 5 Gothons who are trying to"
print "take control of the ship. Each of them has an even uglier"
print "clown costume than the last. They haven't pulled their"
print "weapons out yet, as they see the active bomb under your"
print "arm and don't want to set it off."
action = raw_input("(throw the bomb/slowly place the bomb/something else)> ")
if action == "throw the bomb":
print "In a panic you throw the bomb at the group of Gothons"
print "and make a leap for the door. Right as you drop it a"
print "Gothon shoots you right in the back killing you."
print "As you die you see another Gothon frantically try to disarm"
print "the bomb. You die knowing they will probably blow up when"
print "it goes off."
return 'death'
elif action == "slowly place the bomb":
print "You point your blaster at the bomb under your arm"
print "and the Gothons put their hands up and start to sweat."
print "You inch backward to the door, open it, and then carefully"
print "place the bomb on the floor, pointing your blaster at it."
| |
<filename>mi/instrument/seabird/sbe16plus_v2/ctdbp_no/driver.py<gh_stars>0
"""
@package mi.instrument.seabird.sbe16plus_v2.ctdbp_no.driver
@file mi/instrument/seabird/sbe16plus_v2/ctdbp_no/driver.py
@author <NAME>
@brief Driver class for sbe16plus V2 CTD instrument.
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
import re
import time
import string
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import SampleException
from xml.dom.minidom import parseString
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import ParameterUnit
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import Parameter
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import Command
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SendOptodeCommand
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19Protocol
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19DataParticle
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19StatusParticle
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19ConfigurationParticle
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import OptodeSettingsParticle
from mi.instrument.seabird.sbe16plus_v2.driver import Prompt
from mi.instrument.seabird.driver import SeaBirdParticle
from mi.instrument.seabird.driver import SeaBirdInstrumentDriver
from mi.instrument.seabird.driver import NEWLINE
from mi.instrument.seabird.driver import TIMEOUT
WAKEUP_TIMEOUT = 60
###############################################################################
# Particles
###############################################################################
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
CTD_PARSED = 'ctdbp_no_sample'
DEVICE_STATUS = 'ctdbp_no_status'
DEVICE_CALIBRATION = 'ctdbp_no_calibration_coefficients'
DEVICE_HARDWARE = 'ctdbp_no_hardware'
DEVICE_CONFIGURATION = 'ctdbp_no_configuration'
OPTODE_SETTINGS = 'ctdbp_no_optode_settings'
class SBE16NODataParticle(SBE19DataParticle):
"""
This data particle is identical to the corresponding one for CTDPF-Optode, except for the stream
name, which we specify here
"""
_data_particle_type = DataParticleType.CTD_PARSED
class SBE16NOConfigurationParticle(SBE19ConfigurationParticle):
"""
This data particle is identical to the corresponding one for CTDPF-Optode, except for the stream
name, which we specify here
"""
_data_particle_type = DataParticleType.DEVICE_CONFIGURATION
class SBE16NOStatusParticle(SBE19StatusParticle):
"""
This data particle is identical to the corresponding one for CTDPF-Optode, except for the stream
name, which we specify here
"""
_data_particle_type = DataParticleType.DEVICE_STATUS
class SBE16NOOptodeSettingsParticle(OptodeSettingsParticle):
"""
This data particle is identical to the corresponding one for CTDPF-Optode, except for the stream
name, which we specify here
"""
_data_particle_type = DataParticleType.OPTODE_SETTINGS
class SBE16NOHardwareParticleKey(BaseEnum):
SERIAL_NUMBER = "serial_number"
FIRMWARE_VERSION = "firmware_version"
FIRMWARE_DATE = "firmware_date"
COMMAND_SET_VERSION = "command_set_version"
PCB_SERIAL_NUMBER = "pcb_serial_number"
ASSEMBLY_NUMBER = "assembly_number"
MANUFACTURE_DATE = "manufacture_date"
TEMPERATURE_SENSOR_SERIAL_NUMBER = 'temp_sensor_serial_number'
CONDUCTIVITY_SENSOR_SERIAL_NUMBER = 'cond_sensor_serial_number'
PRESSURE_SENSOR_TYPE = 'pressure_sensor_type'
PRESSURE_SENSOR_SERIAL_NUMBER = 'quartz_pressure_sensor_serial_number'
VOLT0_TYPE = 'volt0_type'
VOLT0_SERIAL_NUMBER = 'volt0_serial_number'
VOLT1_TYPE = 'volt1_type'
VOLT1_SERIAL_NUMBER = 'volt1_serial_number'
class SBE16NOHardwareParticle(SeaBirdParticle):
_data_particle_type = DataParticleType.DEVICE_HARDWARE
@staticmethod
def regex():
"""
Regular expression to match a getHD response pattern
@return: regex string
"""
pattern = r'(<HardwareData.*?</HardwareData>)' + NEWLINE
return pattern
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(SBE16NOHardwareParticle.regex(), re.DOTALL)
@staticmethod
def resp_regex():
"""
Regular expression to match a getHD response pattern
@return: regex string
"""
pattern = r'(<HardwareData.*?</HardwareData>)'
return pattern
@staticmethod
def resp_regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(SBE16NOHardwareParticle.resp_regex(), re.DOTALL)
# noinspection PyPep8Naming
def _build_parsed_values(self):
"""
@throws SampleException If there is a problem with sample creation
"""
SENSOR = "Sensor"
TYPE = "type"
ID = "id"
PCB_SERIAL_NUMBER = "PCBSerialNum"
ASSEMBLY_NUMBER = "AssemblyNum"
SERIAL_NUMBER = "SerialNumber"
FIRMWARE_VERSION = "FirmwareVersion"
FIRMWARE_DATE = "FirmwareDate"
COMMAND_SET_VERSION = "CommandSetVersion"
PCB_ASSEMBLY = "PCBAssembly"
MANUFACTURE_DATE = "MfgDate"
INTERNAL_SENSORS = "InternalSensors"
TEMPERATURE_SENSOR_ID = "Main Temperature"
CONDUCTIVITY_SENSOR_ID = "Main Conductivity"
PRESSURE_SENSOR_ID = "Main Pressure"
EXTERNAL_SENSORS = "ExternalSensors"
VOLT0 = "volt 0"
VOLT1 = "volt 1"
# check to make sure there is a correct match before continuing
match = SBE16NOHardwareParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of parsed hardware data: [%s]" %
self.raw_data)
dom = parseString(self.raw_data)
root = dom.documentElement
log.debug("root.tagName = %s", root.tagName)
serial_number = int(root.getAttribute(SERIAL_NUMBER))
firmware_version = self._extract_xml_element_value(root, FIRMWARE_VERSION)
firmware_date = self._extract_xml_element_value(root, FIRMWARE_DATE)
command_set_version = self._extract_xml_element_value(root, COMMAND_SET_VERSION)
manufacture_date = self._extract_xml_element_value(root, MANUFACTURE_DATE)
pcb_assembly_elements = self._extract_xml_elements(root, PCB_ASSEMBLY)
pcb_serial_number = []
pcb_assembly = []
for assembly in pcb_assembly_elements:
pcb_serial_number.append(assembly.getAttribute(PCB_SERIAL_NUMBER))
pcb_assembly.append(assembly.getAttribute(ASSEMBLY_NUMBER))
temperature_sensor_serial_number = 0
conductivity_sensor_serial_number = 0
pressure_sensor_serial_number = 0
pressure_sensor_type = ""
volt0_serial_number = 0
volt0_type = ""
volt1_serial_number = 0
volt1_type = ""
internal_sensors_element = self._extract_xml_elements(root, INTERNAL_SENSORS)[0]
sensors = self._extract_xml_elements(internal_sensors_element, SENSOR)
for sensor in sensors:
sensor_id = sensor.getAttribute(ID)
if sensor_id == TEMPERATURE_SENSOR_ID:
temperature_sensor_serial_number = int(self._extract_xml_element_value(sensor, SERIAL_NUMBER))
elif sensor_id == CONDUCTIVITY_SENSOR_ID:
conductivity_sensor_serial_number = int(self._extract_xml_element_value(sensor, SERIAL_NUMBER))
elif sensor_id == PRESSURE_SENSOR_ID:
pressure_sensor_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)
pressure_sensor_type = self._extract_xml_element_value(sensor, TYPE)
external_sensors_element = self._extract_xml_elements(root, EXTERNAL_SENSORS)[0]
sensors = self._extract_xml_elements(external_sensors_element, SENSOR)
for sensor in sensors:
sensor_id = sensor.getAttribute(ID)
if sensor_id == VOLT0:
volt0_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)
volt0_type = self._extract_xml_element_value(sensor, TYPE)
elif sensor_id == VOLT1:
volt1_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)
volt1_type = self._extract_xml_element_value(sensor, TYPE)
result = [{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.FIRMWARE_VERSION,
DataParticleKey.VALUE: firmware_version},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.FIRMWARE_DATE,
DataParticleKey.VALUE: firmware_date},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.COMMAND_SET_VERSION,
DataParticleKey.VALUE: command_set_version},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.MANUFACTURE_DATE,
DataParticleKey.VALUE: manufacture_date},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PCB_SERIAL_NUMBER,
DataParticleKey.VALUE: pcb_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.ASSEMBLY_NUMBER,
DataParticleKey.VALUE: pcb_assembly},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.TEMPERATURE_SENSOR_SERIAL_NUMBER,
DataParticleKey.VALUE: temperature_sensor_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.CONDUCTIVITY_SENSOR_SERIAL_NUMBER,
DataParticleKey.VALUE: conductivity_sensor_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PRESSURE_SENSOR_SERIAL_NUMBER,
DataParticleKey.VALUE: pressure_sensor_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PRESSURE_SENSOR_TYPE,
DataParticleKey.VALUE: pressure_sensor_type},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT0_SERIAL_NUMBER,
DataParticleKey.VALUE: volt0_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT0_TYPE,
DataParticleKey.VALUE: volt0_type},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT1_SERIAL_NUMBER,
DataParticleKey.VALUE: volt1_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT1_TYPE,
DataParticleKey.VALUE: volt1_type},
]
return result
class SBE16NOCalibrationParticleKey(BaseEnum):
SERIAL_NUMBER = "serial_number"
TEMP_SENSOR_SERIAL_NUMBER = " temp_sensor_serial_number "
TEMP_CAL_DATE = "calibration_date_temperature"
TA0 = "temp_coeff_ta0"
TA1 = "temp_coeff_ta1"
TA2 = "temp_coeff_ta2"
TA3 = "temp_coeff_ta3"
TOFFSET = "temp_coeff_offset"
COND_SENSOR_SERIAL_NUMBER = " cond_sensor_serial_number "
COND_CAL_DATE = "calibration_date_conductivity"
CONDG = "cond_coeff_cg"
CONDH = "cond_coeff_ch"
CONDI = "cond_coeff_ci"
CONDJ = "cond_coeff_cj"
CPCOR = "cond_coeff_cpcor"
CTCOR = "cond_coeff_ctcor"
CSLOPE = "cond_coeff_cslope"
PRES_SERIAL_NUMBER = "press_serial_number"
PRES_CAL_DATE = "calibration_date_pressure"
PC1 = "press_coeff_pc1"
PC2 = "press_coeff_pc2"
PC3 = "press_coeff_pc3"
PD1 = "press_coeff_pd1"
PD2 = "press_coeff_pd2"
PT1 = "press_coeff_pt1"
PT2 = "press_coeff_pt2"
PT3 = "press_coeff_pt3"
PT4 = "press_coeff_pt4"
PSLOPE = "press_coeff_pslope"
POFFSET = "press_coeff_poffset"
PRES_RANGE = "pressure_sensor_range"
EXT_VOLT0_OFFSET = "ext_volt0_offset"
EXT_VOLT0_SLOPE = "ext_volt0_slope"
EXT_VOLT1_OFFSET = "ext_volt1_offset"
EXT_VOLT1_SLOPE = "ext_volt1_slope"
EXT_VOLT2_OFFSET = "ext_volt2_offset"
EXT_VOLT2_SLOPE = "ext_volt2_slope"
EXT_VOLT3_OFFSET = "ext_volt3_offset"
EXT_VOLT3_SLOPE = "ext_volt3_slope"
EXT_VOLT4_OFFSET = "ext_volt4_offset"
EXT_VOLT4_SLOPE = "ext_volt4_slope"
EXT_VOLT5_OFFSET = "ext_volt5_offset"
EXT_VOLT5_SLOPE = "ext_volt5_slope"
EXT_FREQ = "ext_freq_sf"
class SBE16NOCalibrationParticle(SeaBirdParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.DEVICE_CALIBRATION
@staticmethod
def regex():
pattern = r'(<CalibrationCoefficients.*?</CalibrationCoefficients>)' + NEWLINE
return pattern
@staticmethod
def regex_compiled():
return re.compile(SBE16NOCalibrationParticle.regex(), re.DOTALL)
@staticmethod
def resp_regex():
pattern = r'(<CalibrationCoefficients.*?</CalibrationCoefficients>)'
return pattern
@staticmethod
def resp_regex_compiled():
return re.compile(SBE16NOCalibrationParticle.resp_regex(), re.DOTALL)
def _map_param_to_xml_tag(self, parameter_name):
map_param_to_tag = {SBE16NOCalibrationParticleKey.TEMP_SENSOR_SERIAL_NUMBER: "SerialNum",
SBE16NOCalibrationParticleKey.TEMP_CAL_DATE: "CalDate",
SBE16NOCalibrationParticleKey.TA0: "TA0",
SBE16NOCalibrationParticleKey.TA1: "TA1",
SBE16NOCalibrationParticleKey.TA2: "TA2",
SBE16NOCalibrationParticleKey.TA3: "TA3",
SBE16NOCalibrationParticleKey.TOFFSET: "TOFFSET",
SBE16NOCalibrationParticleKey.COND_SENSOR_SERIAL_NUMBER: "SerialNum",
SBE16NOCalibrationParticleKey.COND_CAL_DATE: "CalDate",
SBE16NOCalibrationParticleKey.CONDG: "G",
SBE16NOCalibrationParticleKey.CONDH: "H",
SBE16NOCalibrationParticleKey.CONDI: "I",
SBE16NOCalibrationParticleKey.CONDJ: "J",
SBE16NOCalibrationParticleKey.CPCOR: "CPCOR",
SBE16NOCalibrationParticleKey.CTCOR: "CTCOR",
SBE16NOCalibrationParticleKey.CSLOPE: "CSLOPE",
SBE16NOCalibrationParticleKey.PRES_SERIAL_NUMBER: "SerialNum",
SBE16NOCalibrationParticleKey.PRES_CAL_DATE: "CalDate",
SBE16NOCalibrationParticleKey.PC1: "PC1",
SBE16NOCalibrationParticleKey.PC2: "PC2",
SBE16NOCalibrationParticleKey.PC3: "PC3",
SBE16NOCalibrationParticleKey.PD1: "PD1",
SBE16NOCalibrationParticleKey.PD2: "PD2",
SBE16NOCalibrationParticleKey.PT1: "PT1",
SBE16NOCalibrationParticleKey.PT2: "PT2",
SBE16NOCalibrationParticleKey.PT3: "PT3",
SBE16NOCalibrationParticleKey.PT4: "PT4",
SBE16NOCalibrationParticleKey.PSLOPE: "PSLOPE",
SBE16NOCalibrationParticleKey.POFFSET: "POFFSET",
SBE16NOCalibrationParticleKey.PRES_RANGE: "PRANGE",
SBE16NOCalibrationParticleKey.EXT_VOLT0_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT0_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT1_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT1_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT2_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT2_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT3_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT3_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT4_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT4_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT5_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT5_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_FREQ: "EXTFREQSF",
}
return map_param_to_tag[parameter_name]
@staticmethod
def _float_to_int(fl_str):
return int(float(fl_str))
# noinspection PyPep8Naming
def _build_parsed_values(self):
"""
Parse the output of the getCC command
@throws SampleException If there is a problem with sample creation
"""
SERIAL_NUMBER = "SerialNumber"
CALIBRATION = "Calibration"
ID = "id"
TEMPERATURE_SENSOR_ID = "Main Temperature"
CONDUCTIVITY_SENSOR_ID = "Main Conductivity"
PRESSURE_SENSOR_ID = "Main Pressure"
VOLT0 = "Volt 0"
VOLT1 = "Volt 1"
VOLT2 = "Volt 2"
VOLT3 = "Volt 3"
VOLT4 = "Volt 4"
VOLT5 = "Volt 5"
EXTERNAL_FREQUENCY_CHANNEL = "external frequency channel"
# check to make sure there is a correct match before continuing
match = SBE16NOCalibrationParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of parsed calibration data: [%s]" %
self.raw_data)
dom = parseString(self.raw_data)
root = dom.documentElement
log.debug("root.tagName = %s", root.tagName)
serial_number = int(root.getAttribute(SERIAL_NUMBER))
result = [{DataParticleKey.VALUE_ID: SBE16NOCalibrationParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: serial_number},
]
calibration_elements = self._extract_xml_elements(root, CALIBRATION)
for calibration in calibration_elements:
id_attr = calibration.getAttribute(ID)
if id_attr == TEMPERATURE_SENSOR_ID:
result.append(
self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TEMP_SENSOR_SERIAL_NUMBER, int))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TEMP_CAL_DATE, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TA0))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TA1))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TA2))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TA3))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TOFFSET))
elif id_attr == CONDUCTIVITY_SENSOR_ID:
result.append(
self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.COND_SENSOR_SERIAL_NUMBER, int))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.COND_CAL_DATE, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CONDG))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CONDH))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CONDI))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CONDJ))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CPCOR))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CTCOR))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CSLOPE))
elif id_attr == PRESSURE_SENSOR_ID:
result.append(
self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PRES_SERIAL_NUMBER, int))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PRES_CAL_DATE, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PC1))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PC2))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PC3))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PD1))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PD2))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PT1))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PT2))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PT3))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PT4))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PSLOPE))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.POFFSET))
result.append(
self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PRES_RANGE, self._float_to_int))
elif id_attr == VOLT0:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT0_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT0_SLOPE))
elif id_attr == VOLT1:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT1_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT1_SLOPE))
elif id_attr == VOLT2:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT2_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT2_SLOPE))
elif id_attr == VOLT3:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT3_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT3_SLOPE))
elif id_attr == VOLT4:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT4_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT4_SLOPE))
elif id_attr == VOLT5:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT5_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT5_SLOPE))
elif id_attr == EXTERNAL_FREQUENCY_CHANNEL:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_FREQ))
return result
###############################################################################
# Seabird Electronics 16plus V2 NO Driver.
###############################################################################
class InstrumentDriver(SeaBirdInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
def __init__(self, evt_callback):
"""
InstrumentDriver constructor.
@param evt_callback Driver process event callback.
"""
#Construct superclass.
SeaBirdInstrumentDriver.__init__(self, evt_callback)
########################################################################
# Superclass overrides for resource query.
########################################################################
# noinspection PyMethodMayBeStatic
def | |
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
import os
import math
import foolbox
import scipy
import matplotlib.pyplot as plt
from PIL import Image
#Utilizes the FoolBox Python library (https://github.com/bethgelab/foolbox) to implement a variety
#of adversarial attacks against deep-learning models implimented in TensorFlow's Core API
class parent_attack:
def __init__(self, attack_dic,
criterion=foolbox.criteria.Misclassification()):
self.model_prediction_function = attack_dic['model_prediction_function']
self.model_weights = attack_dic['model_weights']
self.var_list = attack_dic['var_list']
self.weights_dic = attack_dic['weights_dic']
self.biases_dic = attack_dic['biases_dic']
self.input_data = attack_dic['input_data']
self.input_labels = attack_dic['input_labels']
self.input_placeholder = attack_dic['input_placeholder']
self.dropout_rate_placeholder = attack_dic['dropout_rate_placeholder']
self.output_directory = attack_dic['output_directory']
self.num_attack_examples = attack_dic['num_attack_examples']
self.dynamic_dic = attack_dic['dynamic_dic'] #Determines if e.g. a network section is ablated, or noise is added to the logits
self.batch_size = attack_dic['batch_size']
self.save_images = attack_dic['save_images']
self.estimate_gradients = attack_dic['estimate_gradients']
self.adver_model = attack_dic['adver_model']
self.adver_checkpoint = attack_dic['adver_checkpoint']
self.criterion = criterion #note by default this is simply foolbox's Misclassification criterion
#Define the class attribute, attack_method, to be the Blended Uniform Noise attack by default
attack_method = foolbox.attacks.BlendedUniformNoiseAttack
foolbox_distance_metric = foolbox.distances.MeanSquaredDistance
attack_type_dir = 'Parent_*_not_advised_*_'
def evaluate_resistance(self):
if self.adver_model == None:
logits, _ = self.model_prediction_function(self.input_placeholder, self.dropout_rate_placeholder, self.weights_dic, self.biases_dic, self.dynamic_dic)
saver = tf.train.Saver(self.var_list) #Define saver object for use later when loading the model weights
else:
saver = tf.train.Saver()
self.mk_dir()
with tf.Session() as session:
#Define the foolbox model
if self.adver_model == None:
print("\nEvaluating a non-adversarially trained model")
saver.restore(session, self.model_weights) #Note when restoring weights its important not to run init on the same
#variables, as this will over-write the learned weights with randomly initialized ones
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, logits, (0,1))
else:
print("\nEvaluating an adversarially trained model")
saver.restore(session, self.adver_checkpoint)
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, self.adver_model.pre_softmax, (0,1))
#Wrap the model to enable estimated gradients if desired
if self.estimate_gradients == True:
print("\nUsing a model with *estimated* gradients.")
estimator = foolbox.gradient_estimators.CoordinateWiseGradientEstimator(epsilon=0.01)
fmodel = foolbox.models.ModelWithEstimatedGradients(fmodel, gradient_estimator=estimator)
#The default CoordinateWiseGradientEstimator estimator is the same used in the Schott et al, 2019 ABS paper from ICLR
print("\nPerforming " + self.attack_type_dir + " attack")
print("Evaluating " + str(self.num_attack_examples) + " adversarial example(s)")
#Arrays for storing results of the evaluation
adversary_found = np.zeros([self.num_attack_examples]) #array of booleans that indicates if an adversary was found for a particular image
adversary_distance = np.zeros([self.num_attack_examples])
adversaries_array = np.zeros(np.concatenate(([self.num_attack_examples], self.input_data.shape[1:])))
adversary_labels = []
self.attack_specification(fmodel)
for batch_iter in range(math.ceil(self.num_attack_examples/self.batch_size)):
execution_batch_data = self.input_data[batch_iter*self.batch_size:min((batch_iter+1)*self.batch_size, self.num_attack_examples)]
execution_batch_labels = np.argmax(self.input_labels[batch_iter*self.batch_size:min((batch_iter+1)*self.batch_size, self.num_attack_examples)], axis=1)
#Carry out the attack
adversarial_images, batch_adversary_labels = self.create_adversarial(execution_batch_data, execution_batch_labels)
adversary_labels.extend(batch_adversary_labels)
#Process results of the batched attack
for example_iter in range(execution_batch_data.shape[0]):
if np.any(adversarial_images[example_iter] == None) or np.all(np.isnan(adversarial_images[example_iter])):
print("\nNo adversarial image found - attack returned None or array of NaNs\n")
#As in Schott, 2019 et al, the distance of an unsuccessful attack is recorded as infinity
adversary_distance[batch_iter*self.batch_size + example_iter] = np.inf
else:
adversary_found, adversary_distance, adversaries_array = self.store_data(adversary_found, adversary_distance, adversaries_array,
execution_batch_data[example_iter], execution_batch_labels[example_iter], adversarial_images[example_iter], batch_iter*self.batch_size + example_iter, fmodel)
adversary_labels = np.asarray(adversary_labels)
return adversary_found, adversary_distance, adversaries_array, adversary_labels
def attack_specification(self, fmodel):
self.attack_fmodel = self.attack_method(model=fmodel, criterion=self.criterion, distance=self.foolbox_distance_metric)
#Make the attack directory for storing results
def mk_dir(self):
if os.path.exists('adversarial_images/' + self.output_directory + '/' + self.attack_type_dir + '/') == 0:
try:
os.mkdir('adversarial_images/' + self.output_directory + '/')
except OSError:
pass
try:
os.mkdir('adversarial_images/' + self.output_directory + '/' + self.attack_type_dir + '/')
except OSError:
pass
def create_adversarial(self, execution_data, execution_label):
adversarials = self.attack_fmodel(execution_data, execution_label, unpack=False)
adversary_labels = np.asarray([a.adversarial_class for a in adversarials])
adversarial_images = np.asarray([a.perturbed for a in adversarials])
return adversarial_images, adversary_labels
def store_data(self, adversary_found, adversary_distance, adversaries_array, execution_data, execution_label, adversarial_image, results_iter, fmodel):
adversaries_array[results_iter] = adversarial_image
#Note only 10 adversarial images are saved for a given attack to reduce memory issues
if self.save_images == True and (results_iter<10):
if adversarial_image.shape[2] == 3:
image_to_png = adversarial_image
elif adversarial_image.shape[2] == 1:
image_to_png = np.squeeze(adversarial_image, axis=2) #Remove last dimension if saving to greyscale
plt.imsave('adversarial_images/' + self.output_directory + '/' +
self.attack_type_dir + '/AttackNum' + str(results_iter) + '_Predicted' + str(np.argmax(fmodel.forward(adversarial_image[None, :, :, :]))) +
'_GroundTruth' + str(execution_label) + '.png', image_to_png)
print("The classification label following attack is " + str(np.argmax(fmodel.forward(adversarial_image[None]))) + " from an original classification of " + str(execution_label))
distance, distance_name = self.distance_metric(execution_data.flatten(), adversarial_image.flatten())
print("The " + distance_name + " distance of the adversary is " + str(distance))
adversary_found[results_iter] = 1
adversary_distance[results_iter] = distance
return adversary_found, adversary_distance, adversaries_array
def distance_metric(self, vector1, vector2):
distance = scipy.spatial.distance.euclidean(vector1, vector2)
distance_name = 'Euclidean (L-2)'
return distance, distance_name
class check_stochasticity(parent_attack):
#Performs checks to ensure there are no unintended stochastic elements (e.g. due to numerical issues) in a models predictions in foolbox
def perform_check(self):
logits, _ = self.model_prediction_function(self.input_placeholder, self.dropout_rate_placeholder, self.weights_dic, self.biases_dic, self.dynamic_dic)
saver = tf.train.Saver(self.var_list)
with tf.Session() as session:
saver.restore(session, self.model_weights)
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, logits, (0,1))
print('Checking the models performance on multiple runs of the same images')
for example_iter in range(self.num_attack_examples):
execution_data = self.input_data[example_iter, :, :, :]
logits_list = []
labels_list = []
#Check the same image with multiple runs
for ii in range(10):
#Return the logits and label of the model
predicted_logits = fmodel.forward(execution_data[None,:,:,:])
logits_list.extend(predicted_logits)
#Check every element is equivalent to the most recent prediction
assert np.all(logits_list == np.asarray(predicted_logits)), "***Some of the logits are changing stochastically***"
print("No stochastic elements identified")
class transfer_attack_L2(parent_attack):
#Overwrite parent constructor for two additional attributes : starting_adversaries, epsilon_step_size, and max_iterations
def __init__(self, attack_dic,
starting_adversaries,
epsilon_step_size=0.01,
max_iterations=1000):
parent_attack.__init__(self, attack_dic)
self.starting_adversaries = starting_adversaries
self.epsilon_step_size = epsilon_step_size
self.max_iterations = max_iterations
attack_type_dir = 'Transfer_L2'
#Overwrite evaluate_resistance method with one that finds minimal transfer-attack images
def evaluate_resistance(self):
if self.adver_model == None:
logits, _ = self.model_prediction_function(self.input_placeholder, self.dropout_rate_placeholder, self.weights_dic, self.biases_dic, self.dynamic_dic)
saver = tf.train.Saver(self.var_list) #Define saver object for use later when loading the model weights
else:
saver = tf.train.Saver()
self.mk_dir()
with tf.Session() as session:
#Define the foolbox model
if self.adver_model == None:
print("\nEvaluating a non-adversarially trained model")
saver.restore(session, self.model_weights) #Note when restoring weights its important not to run init on the same
#variables, as this will over-write the learned weights with randomly initialized ones
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, logits, (0,1))
else:
print("\nEvaluating an adversarially trained model")
saver.restore(session, self.adver_checkpoint)
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, self.adver_model.pre_softmax, (0,1))
print("\nPerforming a Transfer attack")
print("Evaluating " + str(self.num_attack_examples) + " adversarial example(s)")
#Arrays for storing results of the evaluation
adversary_distance = np.zeros([4, self.num_attack_examples])
for example_iter in range(self.num_attack_examples):
print("Transfer attack number " + str(example_iter))
#Iterate through the four different starting points for generating adversaries (two different gradient
# based attacks for each of the two main architecture types --> binding or not binding); the minimally-perturbed attack will be returned
for base_method_iter in range(4):
adversary_distance = self.iterative_perturbation(fmodel, adversary_distance, example_iter, base_method_iter, unperturbed_image=self.input_data[example_iter],
ground_truth_label=self.input_labels[example_iter], starting_adversary=self.starting_adversaries[base_method_iter, example_iter])
print("Method " + str(base_method_iter) + " distance is " + str(adversary_distance[base_method_iter, example_iter]))
#Of all images genereated from the base attack types, select the minimally perturbed image for each example
adversary_distance = adversary_distance.min(axis=0)
return adversary_distance
def iterative_perturbation(self, fmodel, adversary_distance, example_iter, base_method_iter, unperturbed_image, ground_truth_label, starting_adversary):
epsilon = 0.0
current_iteration = 1
#First check if the base attack method failed on the surrogate model
#If so, see if the target model correctly classifies it, in which case it is a failed attack, or otherwise it is a successful attack with distance 0
if np.any(starting_adversary == None) or np.all(np.isnan(starting_adversary)):
if (np.argmax(fmodel.forward(unperturbed_image[None])) == np.argmax(ground_truth_label)):
print("Base attack failed, and target model correctly classified image.")
adversary_distance[base_method_iter, example_iter] = np.inf
else:
print("Base attack failed, but target model misclassified image.")
adversary_distance[base_method_iter, example_iter] = 0
else:
#Begin with an *unperturbed* image, as this may already be enough to fool the target model
transfer_perturbed = unperturbed_image
print("Original classification is " + str(np.argmax(fmodel.forward(transfer_perturbed[None]))))
print("Ground truth label is " + str(np.argmax(ground_truth_label)))
# Binary search for transfer attack as used in Schott et al; based on code provided by <NAME> (Bethge Lab)
direction = starting_adversary - unperturbed_image
bad = 0
good = None
epsilon_binary = 1
k = 10
#Rapidly identify starting point for binary search
for _ in range(k):
transfer_perturbed = unperturbed_image + epsilon_binary * direction
transfer_perturbed = np.clip(transfer_perturbed, 0, 1)
print("Epsilon is " + str(epsilon_binary))
if (np.argmax(fmodel.forward(transfer_perturbed[None])) != np.argmax(ground_truth_label)):
good = epsilon_binary
break
else:
bad = epsilon_binary
epsilon_binary *= 2
print("After exponential binary search, the classification is " + str(np.argmax(fmodel.forward(transfer_perturbed[None]))))
if np.argmax(fmodel.forward(transfer_perturbed[None])) == np.argmax(ground_truth_label):
print("Exponential search failed")
adversary_distance[base_method_iter, example_iter] = np.inf
print("The distance is " + | |
<filename>liberapay/utils/i18n.py
# encoding: utf8
from __future__ import print_function, unicode_literals
from collections import namedtuple, OrderedDict
from datetime import date, datetime, timedelta
from decimal import Decimal, InvalidOperation
from hashlib import md5
from io import BytesIO
import re
from unicodedata import combining, normalize
from six import text_type
from aspen.simplates.pagination import parse_specline, split_and_escape
import babel.core
from babel.dates import format_date, format_datetime, format_timedelta
from babel.messages.extract import extract_python
from babel.messages.pofile import Catalog
from babel.numbers import (
format_currency, format_decimal, format_number, format_percent,
NumberFormatError, parse_decimal
)
import jinja2.ext
from mangopay.utils import Money
from markupsafe import Markup
from pando.utils import utcnow
from liberapay.constants import CURRENCIES, D_CENT, D_MAX
from liberapay.exceptions import InvalidNumber
from liberapay.utils.currencies import MoneyBasket
from liberapay.website import website
def LegacyMoney(o):
return o if isinstance(o, (Money, MoneyBasket)) else Money(o, 'EUR')
Wrap = namedtuple('Wrap', 'value wrapper')
BOLD = Markup('<b>%s</b>')
def Bold(value):
return Wrap(value, BOLD)
class Currency(str):
pass
class Age(timedelta):
def __new__(cls, *a, **kw):
if len(a) == 1 and not kw and isinstance(a[0], timedelta):
return timedelta.__new__(cls, a[0].days, a[0].seconds, a[0].microseconds)
return timedelta.__new__(cls, *a, **kw)
class Locale(babel.core.Locale):
def __init__(self, *a, **kw):
super(Locale, self).__init__(*a, **kw)
self.decimal_symbol = self.number_symbols.get('decimal', '.')
delta_p = self.currency_formats['standard'].pattern
assert ';' not in delta_p
self.currency_delta_pattern = '+{0};-{0}'.format(delta_p)
def format_money(self, m, format=None, trailing_zeroes=True):
s = format_currency(m.amount, m.currency, format, locale=self)
if not trailing_zeroes:
s = s.replace(self.decimal_symbol + '00', '')
return s
def format_date(self, date, format='medium'):
if format.endswith('_yearless'):
format = self.date_formats[format]
return format_date(date, format, locale=self)
def format_datetime(self, *a):
return format_datetime(*a, locale=self)
def format_decimal(self, *a):
return format_decimal(*a, locale=self)
def format_list(self, l):
n = len(l)
if n > 2:
last = n - 2
r = l[0]
for i, item in enumerate(l[1:]):
r = self.list_patterns[
'start' if i == 0 else 'end' if i == last else 'middle'
].format(r, item)
return r
elif n == 2:
return self.list_patterns['2'].format(*l)
else:
return l[0] if n == 1 else None
def format_money_basket(self, basket, sep=','):
if basket is None:
return '0'
items = (
format_currency(money.amount, money.currency, locale=self)
for money in basket if money
)
if sep == ',':
r = self.format_list(list(items))
else:
r = sep.join(items)
return r or '0'
def format_money_delta(self, money, *a):
return format_currency(
money.amount, money.currency, *a,
format=self.currency_delta_pattern, locale=self
)
def format_number(self, *a):
return format_number(*a, locale=self)
def format_percent(self, *a):
return format_percent(*a, locale=self)
def parse_decimal_or_400(self, s, maximum=D_MAX):
try:
r = parse_decimal(s, locale=self)
except (InvalidOperation, NumberFormatError, ValueError):
raise InvalidNumber(s)
if r.quantize(D_CENT) != r:
raise InvalidNumber(s)
if maximum is not None and r > maximum:
raise InvalidNumber(s)
return r
@staticmethod
def title(s):
return s[0].upper() + s[1:] if s and s[0].islower() else s
def to_age_str(self, o, **kw):
if not isinstance(o, datetime):
kw.setdefault('granularity', 'day')
return format_timedelta(to_age(o), locale=self, **kw)
ALIASES = {k: v.lower() for k, v in babel.core.LOCALE_ALIASES.items()}
ALIASES_R = {v: k for k, v in ALIASES.items()}
def strip_accents(s):
return ''.join(c for c in normalize('NFKD', s) if not combining(c))
def make_sorted_dict(keys, d):
items = ((k, d[k]) for k in keys)
return OrderedDict(sorted(items, key=lambda t: strip_accents(t[1])))
COUNTRY_CODES = """
AD AE AF AG AI AL AM AO AQ AR AS AT AU AW AX AZ BA BB BD BE BF BG BH BI BJ
BL BM BN BO BQ BR BS BT BV BW BY BZ CA CC CD CF CG CH CI CK CL CM CN CO CR
CU CV CW CX CY CZ DE DJ DK DM DO DZ EC EE EG EH ER ES ET FI FJ FK FM FO FR
GA GB GD GE GF GG GH GI GL GM GN GP GQ GR GS GT GU GW GY HK HM HN HR HT HU
ID IE IL IM IN IO IQ IR IS IT JE JM JO JP KE KG KH KI KM KN KP KR KW KY KZ
LA LB LC LI LK LR LS LT LU LV LY MA MC MD ME MF MG MH MK ML MM MN MO MP MQ
MR MS MT MU MV MW MX MY MZ NA NC NE NF NG NI NL NO NP NR NU NZ OM PA PE PF
PG PH PK PL PM PN PR PS PT PW PY QA RE RO RS RU RW SA SB SC SD SE SG SH SI
SJ SK SL SM SN SO SR SS ST SV SX SY SZ TC TD TF TG TH TJ TK TL TM TN TO TR
TT TV TW TZ UA UG UM US UY UZ VA VC VE VG VI VN VU WF WS YE YT ZA ZM ZW
""".split()
COUNTRIES = make_sorted_dict(COUNTRY_CODES, Locale('en').territories)
CURRENCIES_MAP = {
k: v[-1][0]
for k, v in babel.core.get_global('territory_currencies').items()
if v[-1][0] in CURRENCIES
}
LANGUAGE_CODES_2 = """
aa af ak am ar as az be bg bm bn bo br bs ca cs cy da de dz ee el en eo es
et eu fa ff fi fo fr ga gd gl gu gv ha he hi hr hu hy ia id ig ii is it ja
ka ki kk kl km kn ko ks kw ky lg ln lo lt lu lv mg mk ml mn mr ms mt my nb
nd ne nl nn nr om or os pa pl ps pt rm rn ro ru rw se sg si sk sl sn so sq
sr ss st sv sw ta te tg th ti tn to tr ts uk ur uz ve vi vo xh yo zh zu
""".split()
LANGUAGES_2 = make_sorted_dict(LANGUAGE_CODES_2, Locale('en').languages)
LOCALES = {}
LOCALE_EN = LOCALES['en'] = Locale('en')
LOCALE_EN.catalog = Catalog('en')
LOCALE_EN.catalog.plural_func = lambda n: n != 1
LOCALE_EN.countries = COUNTRIES
LOCALE_EN.languages_2 = LANGUAGES_2
SEARCH_CONFS = dict((
('da', 'danish'),
('de', 'german'),
('en', 'english'),
('es', 'spanish'),
('fi', 'finnish'),
('fr', 'french'),
('hu', 'hungarian'),
('it', 'italian'),
('nb', 'norwegian'),
('nl', 'dutch'),
('nn', 'norwegian'),
('pt', 'portuguese'),
('ro', 'romanian'),
('ru', 'russian'),
('sv', 'swedish'),
('tr', 'turkish'),
))
_ = lambda a: a
HTTP_ERRORS = {
403: _("Forbidden"),
404: _("Not Found"),
429: _("Too Many Requests"),
500: _("Internal Server Error"),
502: _("Upstream Error"),
503: _("Service Unavailable"),
504: _("Gateway Timeout"),
}
del _
ternary_re = re.compile(r'^(.+?) *\? *(.+?) *: *(.+?)$')
and_re = re.compile(r' *&& *')
or_re = re.compile(r' *\|\| *')
def strip_parentheses(s):
s = s.strip()
if s[:1] == '(' and s[-1:] == ')':
s = s[1:-1].strip()
return s
def ternary_sub(m):
g1, g2, g3 = m.groups()
return '%s if %s else %s' % (g2, g1, ternary_re.sub(ternary_sub, strip_parentheses(g3)))
def get_function_from_rule(rule):
rule = ternary_re.sub(ternary_sub, strip_parentheses(rule))
rule = and_re.sub(' and ', rule)
rule = or_re.sub(' or ', rule)
return eval('lambda n: ' + rule, {'__builtins__': {}})
def _decode(o):
return o.decode('ascii') if isinstance(o, bytes) else o
def i_format(loc, s, *a, **kw):
if a:
a = list(a)
for c, f in [(a, enumerate), (kw, dict.items)]:
for k, o in f(c):
o, wrapper = (o.value, o.wrapper) if isinstance(o, Wrap) else (o, None)
if isinstance(o, text_type):
pass
elif isinstance(o, Decimal):
c[k] = format_decimal(o, locale=loc)
elif isinstance(o, int):
c[k] = format_number(o, locale=loc)
elif isinstance(o, Money):
c[k] = loc.format_money(o)
elif isinstance(o, MoneyBasket):
c[k] = loc.format_money_basket(o)
elif isinstance(o, Age):
c[k] = format_timedelta(o, locale=loc, **o.format_args)
elif isinstance(o, timedelta):
c[k] = format_timedelta(o, locale=loc)
elif isinstance(o, datetime):
c[k] = format_datetime(o, locale=loc)
elif isinstance(o, date):
c[k] = format_date(o, locale=loc)
elif isinstance(o, Locale):
c[k] = loc.languages.get(o.language) or o.language.upper()
elif isinstance(o, Currency):
c[k] = loc.currencies.get(o, o)
if wrapper:
c[k] = wrapper % (c[k],)
return s.format(*a, **kw)
def get_text(state, loc, s, *a, **kw):
escape = state['escape']
msg = loc.catalog.get(s)
s2 = None
if msg:
s2 = msg.string
if isinstance(s2, tuple):
s2 = s2[0]
if not s2:
s2 = s
if loc != LOCALE_EN:
loc = LOCALE_EN
state['partial_translation'] = True
if a or kw:
try:
return i_format(loc, escape(_decode(s2)), *a, **kw)
except Exception as e:
website.tell_sentry(e, state)
return i_format(LOCALE_EN, escape(_decode(s)), *a, **kw)
return escape(s2)
def n_get_text(state, loc, s, p, n, *a, **kw):
escape = state['escape']
n, wrapper = (n.value, n.wrapper) if isinstance(n, Wrap) else (n, None)
n = n or 0
msg = loc.catalog.get((s, p) if s else p)
s2 = None
if msg:
try:
s2 = msg.string[loc.catalog.plural_func(n)]
except Exception as e:
website.tell_sentry(e, state)
if not s2:
s2 = s if n == 1 else p
if loc != LOCALE_EN:
loc = LOCALE_EN
state['partial_translation'] = True
kw['n'] = format_number(n, locale=loc) or n
if wrapper:
kw['n'] = wrapper % kw['n']
try:
return i_format(loc, escape(_decode(s2)), *a, **kw)
except Exception as e:
website.tell_sentry(e, state)
return i_format(LOCALE_EN, escape(_decode(s if n == 1 else p)), *a, **kw)
def getdoc(state, name):
versions = state['website'].docs[name]
for lang in state['request'].accept_langs:
doc = versions.get(lang)
| |
<gh_stars>0
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Types for building models of metric description xml files.
UMA uses several XML files to allow clients to describe the metrics that they
collect, e.g.
https://chromium.googlesource.com/chromium/src/+/master/tools/metrics/rappor/rappor.xml
These types can be used to build models that describe the canonical formatted
structure of these files, and the models can be used to extract the contents of
those files, or convert content back into a canonicalized version of the file.
"""
import abc
import re
import xml.etree.ElementTree as ET
from xml.dom import minidom
import pretty_print_xml
# Non-basic type keys for storing comments and text attributes, so they don't
# conflict with regular keys, and can be skipped in JSON serialization.
COMMENT_KEY = ('comment')
TEXT_KEY = ('text')
def GetCommentsForNode(node):
"""Extracts comments in the current node.
Args:
node: The DOM node to extract comments from.
Returns:
A list of comment DOM nodes.
"""
comments = []
node = node.previousSibling
while node:
if node.nodeType == minidom.Node.COMMENT_NODE:
comments.append(node.data)
elif node.nodeType != minidom.Node.TEXT_NODE:
break
node = node.previousSibling
return comments[::-1]
def PutCommentsInNode(doc, node, comments):
"""Appends comments to the DOM node.
Args:
doc: The document to create a comment in.
node: The DOM node to write comments to.
comments: A list of comments.
"""
for comment in comments:
node.appendChild(doc.createComment(comment))
def GetChildrenByTag(node, tag):
"""Get all children of a particular tag type.
Args:
node: The DOM node to write comments to.
tag: The tag of the nodes to collect.
Returns:
A list of DOM nodes.
"""
return [child for child in node.childNodes if child.nodeName == tag]
class NodeType(object):
"""Base type for a type of XML node.
Args:
indent: True iff this node should have its children indented when pretty
printing.
extra_newlines: None or a triple of integers describing the number of
newlines that should be printed (after_open, before_close, after_close)
single_line: True iff this node may be squashed into a single line.
alphabetization: A list of [(tag, keyfn)] pairs, which specify the tags of
the children that should be sorted, and the functions to get sort keys
from xml nodes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, tag,
indent=True,
extra_newlines=None,
single_line=False,
alphabetization=None):
self.tag = tag
self.indent = indent
self.extra_newlines = extra_newlines
self.single_line = single_line
self.alphabetization = alphabetization
@abc.abstractmethod
def Unmarshall(self, node):
"""Extracts the content of the node to an object.
Args:
node: The XML node to extract data from.
Returns:
An object extracted from the node.
"""
@abc.abstractmethod
def Marshall(self, doc, obj):
"""Converts an object into an XML node of this type.
Args:
doc: A document create an XML node in.
obj: The object to be encoded into the XML.
Returns:
An XML node encoding the object.
"""
def GetComments(self, obj):
"""Gets comments for the object being encoded.
Args:
obj: The object to be encoded into the XML.
Returns:
A list of comment nodes for the object.
"""
del obj # Used in ObjectNodeType implementation
# The base NodeType does not store comments
return []
def MarshallIntoNode(self, doc, node, obj):
"""Marshalls the object and appends it to a node, with comments.
Args:
doc: A document create an XML node in.
node: An XML node to marshall the object into.
obj: The object to be encoded into the XML.
"""
PutCommentsInNode(doc, node, self.GetComments(obj))
node.appendChild(self.Marshall(doc, obj))
def GetAttributes(self):
"""Gets a sorted list of attributes that this node can have.
Returns:
A list of names of XML attributes, sorted by the order they should appear.
"""
return []
def GetRequiredAttributes(self):
"""Gets a list of required attributes that this node has.
Returns:
A list of names of required attributes of the node.
"""
return []
def GetNodeTypes(self):
"""Gets a map of tags to node types for all dependent types.
Returns:
A map of tags to node-types for this node and all of the nodes that it
can contain.
"""
return {self.tag: self}
class TextNodeType(NodeType):
"""A type for simple nodes that just have a tag and some text content.
Unmarshalls nodes to strings.
Args:
tag: The name of XML tag for this type of node.
"""
def __str__(self):
return 'TextNodeType("%s")' % self.tag
def Unmarshall(self, node):
"""Extracts the content of the node to an object.
Args:
node: The XML node to extract data from.
Returns:
The object representation of the node.
"""
obj = {}
obj[COMMENT_KEY] = GetCommentsForNode(node)
if not node.firstChild:
return obj
text = node.firstChild.nodeValue
obj[TEXT_KEY] = '\n\n'.join(pretty_print_xml.SplitParagraphs(text))
return obj
def Marshall(self, doc, obj):
"""Converts an object into an XML node of this type.
Args:
doc: A document to create an XML node in.
obj: An object to be encoded into the XML.
Returns:
An XML node encoding the object.
"""
node = doc.createElement(self.tag)
text = obj.get(TEXT_KEY)
if text:
node.appendChild(doc.createTextNode(text))
return node
def GetComments(self, obj):
"""Gets comments for the object being encoded.
Args:
obj: The object to be encoded into the XML.
Returns:
A list of comment nodes for the object.
"""
return obj[COMMENT_KEY]
class ChildType(object):
"""Metadata about a node type's children.
Args:
attr: The field name of the parents model object storing the child's model.
node_type: The NodeType of the child.
multiple: True if the child can be repeated.
"""
def __init__(self, attr, node_type, multiple):
self.attr = attr
self.node_type = node_type
self.multiple = multiple
class ObjectNodeType(NodeType):
"""A complex node type that has attributes or other nodes as children.
Unmarshalls nodes to objects.
Args:
tag: The name of XML tag for this type of node.
attributes: A list of (name, type, regex) tubles, e.g. [('foo', unicode,
r'^\w+$')]. The order of the attributes determines the ordering of
attributes, when serializing objects to XML. The "regex" can be None
to do no validation, otherwise the attribute must match that pattern.
text_attribute: An attribute stored in the text content of the node.
children: A list of ChildTypes describing the objects children.
Raises:
ValueError: Attributes contains duplicate definitions.
"""
def __init__(self,
tag,
attributes=None,
required_attributes=None,
children=None,
text_attribute=None,
**kwargs):
NodeType.__init__(self, tag, **kwargs)
self.attributes = attributes or []
self.required_attributes = required_attributes or []
self.children = children or []
self.text_attribute = text_attribute
if len(self.attributes) != len(set(a for a, _, _ in self.attributes)):
raise ValueError('Duplicate attribute definition.')
def __str__(self):
return 'ObjectNodeType("%s")' % self.tag
def Unmarshall(self, node):
"""Extracts the content of the node to an object.
Args:
node: The XML node to extract data from.
Returns:
An object extracted from the node.
Raises:
ValueError: The node is missing required children.
"""
obj = {}
obj[COMMENT_KEY] = GetCommentsForNode(node)
for attr, attr_type, attr_re in self.attributes:
if node.hasAttribute(attr):
obj[attr] = attr_type(node.getAttribute(attr))
if attr_re is not None:
attr_val = obj.get(attr, '')
if not re.match(attr_re, attr_val):
raise ValueError('%s "%s" does not match regex "%s"' %
(attr, attr_val, attr_re))
# We need to iterate through all the children and get their nodeValue,
# to account for the cases where other children node precedes the text
# attribute.
obj[self.text_attribute] = ''
child = node.firstChild
while child:
obj[self.text_attribute] += (child.nodeValue.strip()
if child.nodeValue else '')
child = child.nextSibling
# This prevents setting a None key with empty string value
if obj[self.text_attribute] == '':
del obj[self.text_attribute]
for child in self.children:
nodes = GetChildrenByTag(node, child.node_type.tag)
if child.multiple:
obj[child.attr] = [
child.node_type.Unmarshall(n) for n in nodes]
elif nodes:
obj[child.attr] = child.node_type.Unmarshall(nodes[0])
return obj
def Marshall(self, doc, obj):
"""Converts an object into an XML node of this type.
Args:
doc: A document create an XML node in.
obj: The object to be encoded into the XML.
Returns:
An XML node encoding the object.
"""
node = doc.createElement(self.tag)
for attr, _, _ in self.attributes:
if attr in obj:
node.setAttribute(attr, str(obj[attr]))
if self.text_attribute and self.text_attribute in obj:
node.appendChild(doc.createTextNode(obj[self.text_attribute]))
for child in self.children:
if child.multiple:
for child_obj in obj[child.attr]:
child.node_type.MarshallIntoNode(doc, node, child_obj)
elif child.attr in obj:
child.node_type.MarshallIntoNode(doc, node, obj[child.attr])
return node
def GetComments(self, obj):
"""Gets comments for the object being encoded.
Args:
obj: The object to be encoded into the XML.
Returns:
A list of comment nodes for the object.
"""
return obj[COMMENT_KEY]
def GetAttributes(self):
"""Gets a sorted list of attributes | |
"2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 8018.0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 8019.0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "Place of Performance filter does not match expected result"
def _test_correct_response_for_recipient_location(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"recipient_locations": [
{"country": "USA", "state": "le_state_code_4", "city": "le_city_name_4"},
{"country": "USA", "state": "le_state_code_7", "county": "007"},
{"country": "USA", "state": "le_state_code_17", "district": "17"},
{"country": "USA", "zip": "le_zip5_20"},
],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 8020.0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 8004.0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 16024.0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "Recipient Location filter does not match expected result"
def _test_correct_response_for_recipient_search_text(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"recipient_search_text": ["recipient_name_10", "recipient_name_14", "000000020"],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 16030.0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 8014.0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "Recipient Search Text filter does not match expected result"
def _test_correct_response_for_recipient_type_names(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"recipient_type_names": ["business_category_1_3", "business_category_2_8"],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 8003.0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 8008.0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "Recipient Type Names filter does not match expected result"
def _test_correct_response_for_award_amounts(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"award_amounts": [
{"upper_bound": 9001},
{"lower_bound": 9013, "upper_bound": 9017},
{"lower_bound": 9027},
],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 8000.0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 8001.0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 8013.0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 8014.0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 8015.0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 8016.0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 16044.0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 8028.0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 8029.0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "Award Amounts filter does not match expected result"
def _test_correct_response_for_cfda_program(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"program_numbers": ["cfda_number_11", "cfda_number_21", "cfda_number_25"],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 16032.0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 8025.0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "CFDA Program filter does not match expected result"
def _test_correct_response_for_naics_codes(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"naics_codes": {"require": ["88", "1616", "2626"]},
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 16042.0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 8008.0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "NAICS Code filter does not match expected result"
def _test_correct_response_for_psc_codes(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"psc_codes": ["0002", "0012", "0024"],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 16014.0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 8024.0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "PSC Code filter does not match expected result"
def _test_correct_response_for_contract_pricing_type_codes(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"contract_pricing_type_codes": [
"type_of_contract_pricing_0",
"type_of_contract_pricing_10",
"type_of_contract_pricing_22",
],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 16010.0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 8022.0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert (
resp.json().get("results") == expected_result
), "Contract Pricing Type Codes filter does not match expected result"
def _test_correct_response_for_set_aside_type_codes(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"set_aside_type_codes": ["type_set_aside_16", "type_set_aside_26", "type_set_aside_28"],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 16042.0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 8028.0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == expected_result, "Set Aside Type Codes filter does not match expected result"
def _test_correct_response_for_set_extent_competed_type_codes(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"extent_competed_type_codes": ["extent_competed_4", "extent_competed_24", "extent_competed_26"],
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 16028.0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 8026.0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert (
resp.json().get("results") == expected_result
), "Extent Competed Type Codes filter does not match expected result"
def _test_correct_response_for_recipient_id(client):
resp = client.post(
"/api/v2/search/spending_over_time",
content_type="application/json",
data=json.dumps(
{
"group": "fiscal_year",
"filters": {
"recipient_id": "c687823d-10af-701b-1bad-650c6e680190-R",
"time_period": [{"start_date": "2007-10-01", "end_date": "2020-09-30"}],
},
}
),
)
expected_result = [
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2008"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2009"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2010"}},
{"aggregated_amount": 8021.0, "time_period": {"fiscal_year": "2011"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2012"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2013"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2014"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2015"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2016"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2017"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2018"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2019"}},
{"aggregated_amount": 0, "time_period": {"fiscal_year": "2020"}},
]
assert resp.status_code == status.HTTP_200_OK
assert resp.json().get("results") == | |
input-gzip.nbt > output.nbt` or
* `python3 -c "import sys, gzip; sys.stdout.buffer.write(
gzip.decompress(sys.stdin.buffer.read()) )" < input-gzip.nbt > output.nbt`
* `application/zlib`, you can use
* `openssl zlib -d -in input-zlib.nbt -out output.nbt` (does not work on most systems)
* `python3 -c "import sys, zlib; sys.stdout.buffer.write(
zlib.decompress(sys.stdin.buffer.read()) )" < input-zlib.nbt > output.nbt`
* something else (especially `image/x-pcx` and `application/octet-stream`),
it is most likely already uncompressed.
The file `output.nbt` generated by one of the above commands can already be
processed with this Kaitai Struct specification.
This spec **only** implements the Java edition format. There is also
a [Bedrock edition](https://wiki.vg/NBT#Bedrock_edition) NBT format,
which uses little-endian encoding and has a few other differences, but it isn't
as popular as the Java edition format.
**Implementation note:** strings in `TAG_String` are incorrectly decoded with
standard UTF-8, while they are encoded in [**Modified UTF-8**](
https://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html#modified-utf-8
) (MUTF-8). That's because MUTF-8 is not supported natively by most target
languages, and thus one must use external libraries to achieve a fully-compliant
decoder. But decoding in standard UTF-8 is still better than nothing, and
it usually works fine.
All Unicode code points with incompatible representations in MUTF-8 and UTF-8 are
U+0000 (_NUL_), U+D800-U+DFFF (_High_ and _Low Surrogates_) and U+10000-U+10FFFF
(all _Supplementary_ Planes; includes e.g. emoticons, pictograms).
A _MUTF-8_-encoded string containing these code points cannot be successfully
decoded as UTF-8. The behavior in this case depends on the target language -
usually an exception is thrown, or the bytes that are not valid UTF-8
are replaced or ignored.
**Sample files:**
* <https://wiki.vg/NBT#Download>
* <https://github.com/twoolie/NBT/blob/f9e892e/tests/world_test/data/scoreboard.dat>
* <https://github.com/chmod222/cNBT/tree/3f74b69/testdata>
* <https://github.com/PistonDevelopers/hematite_nbt/tree/0b85f89/tests>
.. seealso::
Source - https://wiki.vg/NBT
.. seealso::
Source - https://web.archive.org/web/20110723210920/https://www.minecraft.net/docs/NBT.txt
.. seealso::
Source - https://minecraft.gamepedia.com/NBT_format
"""
class Tag(Enum):
end = 0
byte = 1
short = 2
int = 3
long = 4
float = 5
double = 6
byte_array = 7
string = 8
list = 9
compound = 10
int_array = 11
long_array = 12
SEQ_FIELDS = ["root_check", "root"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
if ((self.root_type == MinecraftNbt.Tag.end) and (False)) :
self._debug['root_check']['start'] = self._io.pos()
self.root_check = self._io.read_bytes(0)
self._debug['root_check']['end'] = self._io.pos()
self._debug['root']['start'] = self._io.pos()
self.root = MinecraftNbt.NamedTag(self._io, self, self._root)
self.root._read()
self._debug['root']['end'] = self._io.pos()
class TagLongArray(KaitaiStruct):
SEQ_FIELDS = ["num_tags", "tags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['num_tags']['start'] = self._io.pos()
self.num_tags = self._io.read_s4be()
self._debug['num_tags']['end'] = self._io.pos()
self._debug['tags']['start'] = self._io.pos()
self.tags = [None] * (self.num_tags)
for i in range(self.num_tags):
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
self.tags[i] = self._io.read_s8be()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
self._debug['tags']['end'] = self._io.pos()
@property
def tags_type(self):
if hasattr(self, '_m_tags_type'):
return self._m_tags_type if hasattr(self, '_m_tags_type') else None
self._m_tags_type = MinecraftNbt.Tag.long
return self._m_tags_type if hasattr(self, '_m_tags_type') else None
class TagByteArray(KaitaiStruct):
SEQ_FIELDS = ["len_data", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len_data']['start'] = self._io.pos()
self.len_data = self._io.read_s4be()
self._debug['len_data']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes(self.len_data)
self._debug['data']['end'] = self._io.pos()
class TagIntArray(KaitaiStruct):
SEQ_FIELDS = ["num_tags", "tags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['num_tags']['start'] = self._io.pos()
self.num_tags = self._io.read_s4be()
self._debug['num_tags']['end'] = self._io.pos()
self._debug['tags']['start'] = self._io.pos()
self.tags = [None] * (self.num_tags)
for i in range(self.num_tags):
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
self.tags[i] = self._io.read_s4be()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
self._debug['tags']['end'] = self._io.pos()
@property
def tags_type(self):
if hasattr(self, '_m_tags_type'):
return self._m_tags_type if hasattr(self, '_m_tags_type') else None
self._m_tags_type = MinecraftNbt.Tag.int
return self._m_tags_type if hasattr(self, '_m_tags_type') else None
class TagList(KaitaiStruct):
SEQ_FIELDS = ["tags_type", "num_tags", "tags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tags_type']['start'] = self._io.pos()
self.tags_type = KaitaiStream.resolve_enum(MinecraftNbt.Tag, self._io.read_u1())
self._debug['tags_type']['end'] = self._io.pos()
self._debug['num_tags']['start'] = self._io.pos()
self.num_tags = self._io.read_s4be()
self._debug['num_tags']['end'] = self._io.pos()
self._debug['tags']['start'] = self._io.pos()
self.tags = [None] * (self.num_tags)
for i in range(self.num_tags):
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_on = self.tags_type
if _on == MinecraftNbt.Tag.long_array:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = MinecraftNbt.TagLongArray(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.compound:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = MinecraftNbt.TagCompound(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.double:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
self.tags[i] = self._io.read_f8be()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.list:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = MinecraftNbt.TagList(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.float:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
self.tags[i] = self._io.read_f4be()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.short:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
self.tags[i] = self._io.read_s2be()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.int:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
self.tags[i] = self._io.read_s4be()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.byte_array:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = MinecraftNbt.TagByteArray(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.byte:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
self.tags[i] = self._io.read_s1()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.int_array:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = MinecraftNbt.TagIntArray(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.string:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = MinecraftNbt.TagString(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
elif _on == MinecraftNbt.Tag.long:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
self.tags[i] = self._io.read_s8be()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
self._debug['tags']['arr'][i]['end'] = self._io.pos()
self._debug['tags']['end'] = self._io.pos()
class TagString(KaitaiStruct):
SEQ_FIELDS = ["len_data", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len_data']['start'] = self._io.pos()
self.len_data = self._io.read_u2be()
self._debug['len_data']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = (self._io.read_bytes(self.len_data)).decode(u"utf-8")
self._debug['data']['end'] = self._io.pos()
class TagCompound(KaitaiStruct):
SEQ_FIELDS = ["tags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tags']['start'] = self._io.pos()
self.tags = []
i = 0
while True:
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = MinecraftNbt.NamedTag(self._io, self, self._root)
_t_tags._read()
_ = _t_tags
self.tags.append(_)
self._debug['tags']['arr'][len(self.tags) - 1]['end'] = self._io.pos()
if _.is_tag_end:
break
i += 1
self._debug['tags']['end'] = self._io.pos()
@property
def dump_num_tags(self):
if hasattr(self, '_m_dump_num_tags'):
return self._m_dump_num_tags if hasattr(self, '_m_dump_num_tags') else None
self._m_dump_num_tags = (len(self.tags) - (1 if ((len(self.tags) >= 1) and (self.tags[-1].is_tag_end)) else 0))
return self._m_dump_num_tags if hasattr(self, '_m_dump_num_tags') else None
class NamedTag(KaitaiStruct):
SEQ_FIELDS = ["type", "name", "payload"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['type']['start'] = self._io.pos()
self.type = KaitaiStream.resolve_enum(MinecraftNbt.Tag, self._io.read_u1())
self._debug['type']['end'] = self._io.pos()
if not (self.is_tag_end):
self._debug['name']['start'] = self._io.pos()
self.name = MinecraftNbt.TagString(self._io, self, self._root)
self.name._read()
self._debug['name']['end'] = self._io.pos()
if not (self.is_tag_end):
self._debug['payload']['start'] = self._io.pos()
_on = self.type
if _on == MinecraftNbt.Tag.long_array:
self.payload = MinecraftNbt.TagLongArray(self._io, self, self._root)
self.payload._read()
elif _on == MinecraftNbt.Tag.compound:
self.payload = MinecraftNbt.TagCompound(self._io, self, self._root)
self.payload._read()
elif _on == MinecraftNbt.Tag.double:
self.payload = self._io.read_f8be()
elif _on == MinecraftNbt.Tag.list:
self.payload = MinecraftNbt.TagList(self._io, self, self._root)
self.payload._read()
elif _on == MinecraftNbt.Tag.float:
self.payload = self._io.read_f4be()
elif _on == MinecraftNbt.Tag.short:
self.payload = self._io.read_s2be()
elif _on == MinecraftNbt.Tag.int:
self.payload = self._io.read_s4be()
elif _on == MinecraftNbt.Tag.byte_array:
self.payload = MinecraftNbt.TagByteArray(self._io, self, self._root)
self.payload._read()
elif _on == MinecraftNbt.Tag.byte:
self.payload = self._io.read_s1()
elif _on == MinecraftNbt.Tag.int_array:
self.payload = MinecraftNbt.TagIntArray(self._io, self, self._root)
self.payload._read()
elif _on == MinecraftNbt.Tag.string:
self.payload = MinecraftNbt.TagString(self._io, self, self._root)
self.payload._read()
elif _on == | |
if self._Version == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_VERSION", File=self.MetaFile)
return self._Version
## Retrieve platform description file version
def _GetDscSpec(self):
if self._DscSpecification == None:
if self._Header == None:
self._GetHeaderInfo()
if self._DscSpecification == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No DSC_SPECIFICATION", File=self.MetaFile)
return self._DscSpecification
## Retrieve OUTPUT_DIRECTORY
def _GetOutpuDir(self):
if self._OutputDirectory == None:
if self._Header == None:
self._GetHeaderInfo()
if self._OutputDirectory == None:
self._OutputDirectory = os.path.join("Build", self._PlatformName)
return self._OutputDirectory
## Retrieve SUPPORTED_ARCHITECTURES
def _GetSupArch(self):
if self._SupArchList == None:
if self._Header == None:
self._GetHeaderInfo()
if self._SupArchList == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No SUPPORTED_ARCHITECTURES", File=self.MetaFile)
return self._SupArchList
## Retrieve BUILD_TARGETS
def _GetBuildTarget(self):
if self._BuildTargets == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BuildTargets == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BUILD_TARGETS", File=self.MetaFile)
return self._BuildTargets
def _GetPcdInfoFlag(self):
if self._PcdInfoFlag == None or self._PcdInfoFlag.upper() == 'FALSE':
return False
elif self._PcdInfoFlag.upper() == 'TRUE':
return True
else:
return False
def _GetVarCheckFlag(self):
if self._VarCheckFlag == None or self._VarCheckFlag.upper() == 'FALSE':
return False
elif self._VarCheckFlag.upper() == 'TRUE':
return True
else:
return False
def _GetAviableSkuIds(self):
if self._AvilableSkuIds:
return self._AvilableSkuIds
return self.SkuIdentifier
def _GetSkuIdentifier(self):
if self._SkuName:
return self._SkuName
if self._SkuIdentifier == None:
if self._Header == None:
self._GetHeaderInfo()
return self._SkuIdentifier
## Retrieve SKUID_IDENTIFIER
def _GetSkuName(self):
if self._SkuName == None:
if self._Header == None:
self._GetHeaderInfo()
if (self._SkuName == None or self._SkuName not in self.SkuIds):
self._SkuName = 'DEFAULT'
return self._SkuName
## Override SKUID_IDENTIFIER
def _SetSkuName(self, Value):
self._SkuName = Value
self._Pcds = None
def _GetFdfFile(self):
if self._FlashDefinition == None:
if self._Header == None:
self._GetHeaderInfo()
if self._FlashDefinition == None:
self._FlashDefinition = ''
return self._FlashDefinition
def _GetPrebuild(self):
if self._Prebuild == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Prebuild == None:
self._Prebuild = ''
return self._Prebuild
def _GetPostbuild(self):
if self._Postbuild == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Postbuild == None:
self._Postbuild = ''
return self._Postbuild
## Retrieve FLASH_DEFINITION
def _GetBuildNumber(self):
if self._BuildNumber == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BuildNumber == None:
self._BuildNumber = ''
return self._BuildNumber
## Retrieve MAKEFILE_NAME
def _GetMakefileName(self):
if self._MakefileName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._MakefileName == None:
self._MakefileName = ''
return self._MakefileName
## Retrieve BsBaseAddress
def _GetBsBaseAddress(self):
if self._BsBaseAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BsBaseAddress == None:
self._BsBaseAddress = ''
return self._BsBaseAddress
## Retrieve RtBaseAddress
def _GetRtBaseAddress(self):
if self._RtBaseAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._RtBaseAddress == None:
self._RtBaseAddress = ''
return self._RtBaseAddress
## Retrieve the top address for the load fix address
def _GetLoadFixAddress(self):
if self._LoadFixAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._LoadFixAddress == None:
self._LoadFixAddress = self._Macros.get(TAB_FIX_LOAD_TOP_MEMORY_ADDRESS, '0')
try:
self._LoadFixAddress = int (self._LoadFixAddress, 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (self._LoadFixAddress))
#
# If command line defined, should override the value in DSC file.
#
if 'FIX_LOAD_TOP_MEMORY_ADDRESS' in GlobalData.gCommandLineDefines.keys():
try:
self._LoadFixAddress = int(GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS'], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS']))
if self._LoadFixAddress < 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid negative value 0x%x" % (self._LoadFixAddress))
if self._LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self._LoadFixAddress % 0x1000 != 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid unaligned 4K value 0x%x" % (self._LoadFixAddress))
return self._LoadFixAddress
## Retrieve RFCLanguage filter
def _GetRFCLanguages(self):
if self._RFCLanguages == None:
if self._Header == None:
self._GetHeaderInfo()
if self._RFCLanguages == None:
self._RFCLanguages = []
return self._RFCLanguages
## Retrieve ISOLanguage filter
def _GetISOLanguages(self):
if self._ISOLanguages == None:
if self._Header == None:
self._GetHeaderInfo()
if self._ISOLanguages == None:
self._ISOLanguages = []
return self._ISOLanguages
## Retrieve the GUID string for VPD tool
def _GetVpdToolGuid(self):
if self._VpdToolGuid == None:
if self._Header == None:
self._GetHeaderInfo()
if self._VpdToolGuid == None:
self._VpdToolGuid = ''
return self._VpdToolGuid
## Retrieve [SkuIds] section information
def _GetSkuIds(self):
if self._SkuIds == None:
self._SkuIds = sdict()
RecordList = self._RawData[MODEL_EFI_SKU_ID, self._Arch]
for Record in RecordList:
if Record[0] in [None, '']:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID number',
File=self.MetaFile, Line=Record[-1])
if Record[1] in [None, '']:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID name',
File=self.MetaFile, Line=Record[-1])
self._SkuIds[Record[1]] = Record[0]
if 'DEFAULT' not in self._SkuIds:
self._SkuIds['DEFAULT'] = '0'
if 'COMMON' not in self._SkuIds:
self._SkuIds['COMMON'] = '0'
return self._SkuIds
## Retrieve [Components] section information
def _GetModules(self):
if self._Modules != None:
return self._Modules
self._Modules = sdict()
RecordList = self._RawData[MODEL_META_DATA_COMPONENT, self._Arch]
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
for Record in RecordList:
DuplicatedFile = False
ModuleFile = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
ModuleId = Record[5]
LineNo = Record[6]
# check the file validation
ErrorCode, ErrorInfo = ModuleFile.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
# Check duplication
# If arch is COMMON, no duplicate module is checked since all modules in all component sections are selected
if self._Arch != 'COMMON' and ModuleFile in self._Modules:
DuplicatedFile = True
Module = ModuleBuildClassObject()
Module.MetaFile = ModuleFile
# get module private library instance
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, ModuleId]
for Record in RecordList:
LibraryClass = Record[0]
LibraryPath = PathClass(NormPath(Record[1], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = LibraryPath.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for %s\n\t%s [%s]" % (ModuleFile, LibraryPath, LibraryClass))
Module.LibraryClasses[LibraryClass] = LibraryPath
if LibraryPath not in self.LibraryInstances:
self.LibraryInstances.append(LibraryPath)
# get module private PCD setting
for Type in [MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, \
MODEL_PCD_FEATURE_FLAG, MODEL_PCD_DYNAMIC, MODEL_PCD_DYNAMIC_EX]:
RecordList = self._RawData[Type, self._Arch, None, ModuleId]
for TokenSpaceGuid, PcdCName, Setting, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
TokenList = GetSplitValueList(Setting)
DefaultValue = TokenList[0]
if len(TokenList) > 1:
MaxDatumSize = TokenList[1]
else:
MaxDatumSize = ''
TypeString = self._PCD_TYPE_STRING_[Type]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
TypeString,
'',
DefaultValue,
'',
MaxDatumSize,
{},
False,
None
)
Module.Pcds[PcdCName, TokenSpaceGuid] = Pcd
# get module private build options
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, None, ModuleId]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
if (ToolChainFamily, ToolChain) not in Module.BuildOptions:
Module.BuildOptions[ToolChainFamily, ToolChain] = Option
else:
OptionString = Module.BuildOptions[ToolChainFamily, ToolChain]
Module.BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, None, ModuleId]
if DuplicatedFile and not RecordList:
EdkLogger.error('build', FILE_DUPLICATED, File=self.MetaFile, ExtraData=str(ModuleFile), Line=LineNo)
if RecordList:
if len(RecordList) != 1:
EdkLogger.error('build', OPTION_UNKNOWN, 'Only FILE_GUID can be listed in <Defines> section.',
File=self.MetaFile, ExtraData=str(ModuleFile), Line=LineNo)
ModuleFile = ProcessDuplicatedInf(ModuleFile, RecordList[0][2], GlobalData.gWorkspace)
ModuleFile.Arch = self._Arch
self._Modules[ModuleFile] = Module
return self._Modules
## Retrieve all possible library instances used in this platform
def _GetLibraryInstances(self):
if self._LibraryInstances == None:
self._GetLibraryClasses()
return self._LibraryInstances
## Retrieve [LibraryClasses] information
def _GetLibraryClasses(self):
if self._LibraryClasses == None:
self._LibraryInstances = []
#
# tdict is a special dict kind of type, used for selecting correct
# library instance for given library class and module type
#
LibraryClassDict = tdict(True, 3)
# track all library class names
LibraryClassSet = set()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, -1]
Macros = self._Macros
for Record in RecordList:
LibraryClass, LibraryInstance, Dummy, Arch, ModuleType, Dummy, LineNo = Record
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for arch=%s\n\t%s [%s]" % (Arch, LibraryInstance, LibraryClass))
LibraryClassSet.add(LibraryClass)
LibraryInstance = PathClass(NormPath(LibraryInstance, Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = LibraryInstance.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if ModuleType != 'COMMON' and ModuleType not in SUP_MODULE_LIST:
EdkLogger.error('build', OPTION_UNKNOWN, "Unknown module type [%s]" % ModuleType,
File=self.MetaFile, ExtraData=LibraryInstance, Line=LineNo)
LibraryClassDict[Arch, ModuleType, LibraryClass] = LibraryInstance
if LibraryInstance not in self._LibraryInstances:
self._LibraryInstances.append(LibraryInstance)
# resolve the specific library instance for each class and each module type
self._LibraryClasses = tdict(True)
for LibraryClass in LibraryClassSet:
# try all possible module types
for ModuleType in SUP_MODULE_LIST:
LibraryInstance = LibraryClassDict[self._Arch, ModuleType, LibraryClass]
if LibraryInstance == None:
continue
self._LibraryClasses[LibraryClass, ModuleType] = LibraryInstance
# for Edk style library instances, which are listed in different section
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch]
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if File not in self._LibraryInstances:
self._LibraryInstances.append(File)
#
# we need the module name | |
<reponame>vijayraghu/testoptustwil<filename>main.py
# -*- coding: utf-8 -*-
import os
import sys
import urllib
import requests
import json
from google.protobuf.json_format import MessageToJson
import re
import datetime
from flask import Flask, request, Response, make_response, jsonify, url_for
from contextlib import closing
# Twilio Helper Library
from twilio.twiml.voice_response import VoiceResponse, Gather, Say, Dial
# Google Text To Speech SDK
from google.oauth2 import service_account
from google.cloud import texttospeech_v1beta1 as texttospeech
# Dialogflow V2 SDK
import dialogflow
#####
##### Declare Global variables
#####
# Setting Google ID - Read env data
project_id = os.environ["DIALOGFLOW_PROJECT_ID"]
#Setting Google authorization credentials - Read env data
credentials_dgf = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
call_id = "12345"
lang_code = 'en'
emp_id = "1043456"
app = Flask(__name__)
# Receive call from Twilio with paramters
@app.route('/welcome', methods=['GET','POST'])
def welcome():
#Setting Google Dialogflow Credentials and invoking SDK
service_account_info = json.loads(credentials_dgf)
credentials = service_account.Credentials.from_service_account_info(service_account_info)
session_client = dialogflow.SessionsClient(credentials=credentials)
session = session_client.session_path(project_id, call_id)
event_input = dialogflow.types.EventInput(name='Welcome', language_code=lang_code)
query_input = dialogflow.types.QueryInput(event=event_input)
response = session_client.detect_intent(session=session, query_input=query_input)
print response
output_text = response.query_result.fulfillment_text
output_text = output_text.decode('utf-8')
print output_text
return output_text
#####
##### Process Twilio ASR: "Speech to Text" to Dialogflow Intent analysis
#####
@app.route('/process_speech', methods=['GET', 'POST'])
def process_speech():
input_text = request.values.get('input_text', '')
print input_text
# Step 1: Call Dialogflow for intent analysis
intent_name, output_text, optus_product = dialogflow_text_to_intent(project_id, call_id, input_text, lang_code)
print intent_name, output_text, optus_product
return output_text
#####
##### Google Dialogflow V2 API - Intent identification from text
#####
<EMAIL>('/dialogflow_text_to_intent', methods=['GET', 'POST'])
def dialogflow_text_to_intent(project_id, call_id, input_text, lang_code):
print project_id, call_id, input_text, lang_code
#Setting Google Dialogflow Credentials and invoking SDK
service_account_info = json.loads(credentials_dgf)
credentials = service_account.Credentials.from_service_account_info(service_account_info)
session_client = dialogflow.SessionsClient(credentials=credentials)
session = session_client.session_path(project_id, call_id)
if input_text:
text_input = dialogflow.types.TextInput(text=input_text, language_code=lang_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(session=session, query_input=query_input)
print response
paramvalues = MessageToJson(response.query_result.parameters)
param_values = json.loads(paramvalues)
print param_values
print response.query_result.fulfillment_text.encode('utf-8')
# Return properties from Dialogflow
try:
intent_name = response.query_result.intent.display_name
except:
intent_name = ""
try:
output_text = response.query_result.fulfillment_text.encode('utf-8')
print 'output: ' + output_text
except:
output_text = ""
try:
optus_product = param_values["optus_product"]
except:
optus_product = ""
try:
emp_id = param_values["employee_id"]
except:
emp_id = ""
return intent_name, output_text, optus_product
#####
##### Dialogflow fulfillment webhook
#####
@app.route('/webhook', methods=['POST'])
# Receive the JSON request from Dialogflow
def webhook():
req = request.get_json(silent=True, force=True)
print 'Request:'
print json.dumps(req, indent=4)
res = processRequest(req)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
# Get details from JSON
def processRequest(req):
result = req.get('result')
metadata = result.get('metadata')
intentname = metadata.get('intentName')
parameters = result.get('parameters')
actionname = parameters.get('action')
emp_id = parameters.get('employee_id')
print emp_id
product_name = parameters.get('optus_product')
# Handle Default Fallback Intent
if intentname == 'Default Fallback Intent':
print 'Intent :' + intentname
context = result.get('contexts')
if "parameters" in context[0]:
con_emp_id = context[0]['parameters']['employee_id.original']
print con_emp_id
if str(con_emp_id) != '':
print 'I am here'
speech = 'I not sure I quite understand. Apologies. I’m new here at Optus and still in training and learning about all our product lines, maybe if you could tell me the general reason for your call today like Billing or Sales or perhaps it’s technical. If you are not sure, please say exit'
else:
speech = 'I not sure I quite understand. Apologies. If you could just tell me your employee number speaking every digit individually, i can help you. If you dont have an employee number, thats fine. Just say you dont have it or say exit.'
else:
speech = 'I not sure I quite understand. Apologies. If you could just tell me your employee number speaking every digit individually, i can help you. If you dont have an employee number, thats fine. Just say you dont have it or say exit.'
# Process employee number
if intentname == 'get_employee_number_cartwright':
print 'Intent :' + intentname
#Validate employee number
if (str(int(emp_id))[:2]) != '10':
speech = 'Hmmm! That does not seem to be a valid employee number. Care for me is for internal employees only. Would you like me to transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
else:
employee_name = get_employee_name(emp_id)
speech = 'Thanks ' + employee_name + ' for providing your employee number. Now how can we help you today?'
# Process employee number again
if intentname == 'get_employee_number_cartwright-again':
if (str(int(emp_id))[:2]) != '10':
speech = 'Sorry that still doesn’t not check out. Perhaps you should chat with your manager. Would you like me to transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
else:
employee_name = get_employee_name(emp_id)
speech = 'Thanks ' + employee_name + ' for providing your employee number. Now how can we help you today?'
# Transfer to General customer care when user says ok for transfer post unsuccessful employee id check
if intentname == 'get_employee_number_cartwright-transfer':
speech = 'My colleague in the General Customer Service Team will help you with your inquiry today.'
# Transfer for Billing_services
if intentname == 'billing_services_cartwright':
if (str(int(emp_id))[:2]) != '10':
speech = 'Hmmm! That does not seem to be a valid employee number. Care for me is for internal employees only. Would you like me to transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
else:
employee_name = get_employee_name(emp_id)
speech = 'Ok ' + employee_name + '. Let me transfer you to one of my colleagues that can help you with your Billing inquiry'
#Process employee number again
if intentname == 'billing_services_cartwright-getempnumber':
if (str(int(emp_id))[:2]) != '10':
speech = 'Sorry that still don’t not check out, perhaps you should chat with your manager. Would you like me to transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
else:
employee_name = get_employee_name(emp_id)
speech = 'Thanks ' + employee_name + ' for providing your employee number. Let me transfer you to one of my colleagues that can help you with your Billing inquiry'
# Transfer to General customer care when user says ok for transfer post unsuccessful employee id check
if intentname == 'billing_services_cartwright-transfer':
speech = 'My colleague in the General Customer Service Team will help you with your inquiry today.'
# Transfer for Sales_services
if intentname == 'sales_services_cartwright':
if (str(int(emp_id))[:2]) != '10':
speech = 'Hmmm! That does not seem to be a valid employee number. Care for me is for internal employees only. Would you like me to transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
else:
employee_name = get_employee_name(emp_id)
speech = 'Ok ' + employee_name + '.Let me transfer you to one of my colleagues that can help you with your Sales inquiry'
#Process employee number again
if intentname == 'sales_services_cartwright-getempnumber':
if (str(int(emp_id))[:2]) != '10':
speech = 'Sorry that still don’t not check out, perhaps you should chat with your manager. Would you like me to transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
else:
employee_name = get_employee_name(emp_id)
speech = 'Thanks ' + employee_name + ' for providing your employee number. Let me transfer you to one of my colleagues that can help you with your Sales inquiry'
# Transfer to General customer care when user says ok for transfer post unsuccessful employee id check
if intentname == 'sales_services_cartwright-transfer':
speech = 'My colleague in the General Customer Service Team will help you with your inquiry today.'
# Transfer for Tech_services
if intentname == 'tech_services_cartwright':
if (str(int(emp_id))[:2]) != '10':
speech = 'Hmmm! That does not seem to be a valid employee number. Care for me is for internal employees only. Would you like me to transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
else:
employee_name = get_employee_name(emp_id)
speech = 'Ok ' + employee_name + '.Let me transfer you to one of my colleagues that can help you with your technical inquiry'
#Process employee number again
if intentname == 'tech_services_cartwright-getempnumber':
if (str(int(emp_id))[:2]) != '10':
speech = 'Sorry that still don’t not check out, perhaps you should chat with your manager. Would you like me to transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
else:
employee_name = get_employee_name(emp_id)
speech = 'Thanks ' + employee_name + ' for providing your employee number. Let me transfer you to one of my colleagues that can help you with your technical inquiry'
# Transfer to General customer care when user says ok for transfer post unsuccessful employee id check
if intentname == 'tech_services_cartwright-transfer':
speech = 'My colleague in the General Customer Service Team will help you with your inquiry today.'
# Transfer to General services if employee number is not provided
if intentname == 'no_employee_number_cartwright':
speech = 'Let me transfer you to one of my colleagues in the General Customer Service Team that can help you with your inquiry today.'
# Catch all error/exception scenarios and transfer to General services
#else:
#print 'I | |
,f"orgchartportal_manage_permissions_view: context variable division_list either has more data than allowed ({from_api_division_list - required_division_list}) or has less data than allowed ({required_division_list - from_api_division_list})")
self.assertEqual(from_api_wu_desc_list, required_wu_desc_list
,f"orgchartportal_manage_permissions_view: context variable wu_desc_list either has more data than allowed ({from_api_wu_desc_list - required_wu_desc_list}) or has less data than allowed ({required_wu_desc_list - from_api_wu_desc_list})")
def test_views_response_status_200(self):
"""Test normal user"""
remove_admin_status()
self.assert_response_status_200()
"""Test admin user"""
grant_admin_status()
self.assert_response_status_200()
def test_views_response_user_admin_restriction(self):
#TODO IMPLEMENT THIS WHEN NEW USER AND APP PERMISSION MANAGEMENT IS IN PLACE
# """Test inactive user (Normal), should have NO access to regular or admin views"""
# remove_admin_status()
# remove_active_user_status()
# self.assert_inactive_user_no_access_on_normal_and_admin_view()
# """Test inactive user (Admin), should have NO access to regular or admin views"""
# grant_admin_status()
# remove_active_user_status()
# self.assert_inactive_user_no_access_on_normal_and_admin_view()
"""Test active user (Normal), should only have access to regular views"""
grant_active_user_status()
remove_admin_status()
self.assert_user_access_on_normal_and_admin_view()
"""Test active user (Admin), should have access to regular and admin views"""
grant_active_user_status()
grant_admin_status()
self.assert_admin_access_on_normal_and_admin_view()
def test_views_response_data(self):
"""
Test views to have the required GET request context data
Some views have additional context data, need to test for those here
"""
# Test normal user
remove_admin_status()
self.assert_additional_context_data(additional_requirements=self.additional_context_requirements)
# Test admin user
grant_admin_status()
self.assert_additional_context_data(additional_requirements=self.additional_context_requirements)
class TestAPIUpdateEmployeeData(HttpPostTestCase):
@classmethod
def setUpClass(self):
self.api_name = 'orgchartportal_update_employee_data'
self.post_response_json_key_specifications = []
tear_down()
set_up_permissions()
self.test_pms = TEST_PMS
self.__null_out_test_pms_obj(self)
## Sequence 0, should work anytime
self.valid_payload0 = [
{
'to_pms' : self.test_pms
,'column_name' : 'Supervisor'
,'new_value' : TEST_SUPERVISOR_PMS
}
,{
'to_pms' : self.test_pms
,'column_name' : 'Office Title'
,'new_value' : 'Hello World!'
}
,{
'to_pms' : self.test_pms
,'column_name' : 'Site'
,'new_value' : 'BK.H'
}
,{
'to_pms' : self.test_pms
,'column_name' : 'Floor'
,'new_value' : 'BK.H.1'
}
,{
'to_pms' : self.test_pms
,'column_name' : 'Site Type'
,'new_value' : '13'
}
]
## Sequence 1: Test auto null out of site floor and site type when site is changed
self.valid_payload1 = [
{
'to_pms' : self.test_pms
,'column_name' : 'Site'
,'new_value' : 'MN.H'
}
]
## Sequence 2: Test null out of site type when site floor is changed, must use a floor id that has multiple possible site type to it, so it doesn't trigger the API's auto populate of site type id if there's only one possible site type id
self.valid_payload2 = [
{
'to_pms' : self.test_pms
,'column_name' : 'Site'
,'new_value' : 'BK.D'
},{
'to_pms' : self.test_pms
,'column_name' : 'Floor'
,'new_value' : 'BK.D.2'
}
]
## Sequence 3: Test auto set site type when site floor has only one site type, like 'MN.H.9'
self.valid_payload3 = [
{
'to_pms' : self.test_pms
,'column_name' : 'Site'
,'new_value' : 'MN.H'
},{ ## Floor change to MN.H.9 should also set the actual stie type since there's only one valid floor site for that site floor. Make sure to check it in the '## Check if data was saved correctly and if tblChanges was updated correctly' section
'to_pms' : self.test_pms
,'column_name' : 'Floor'
,'new_value' : 'MN.H.9'
}
]
## Sequence 4: Test site type direct update, but first will need to reset site floor to another site floor with multiple site types
self.valid_payload4 = [
{
'to_pms' : self.test_pms
,'column_name' : 'Site'
,'new_value' : 'BK.B'
},{
'to_pms' : self.test_pms
,'column_name' : 'Floor'
,'new_value' : 'BK.B.1' ## Should accept 7 or 3 for site type
},{
'to_pms' : self.test_pms
,'column_name' : 'Site Type'
,'new_value' : '3'
}
,{
'to_pms' : self.test_pms
,'column_name' : 'Site Type'
,'new_value' : '7'
}
]
@classmethod
def tearDownClass(self):
self.__null_out_test_pms_obj(self)
tear_down()
def test_with_valid_data(self):
## Sequence 0
self.__null_out_test_pms_obj()
for payload in self.valid_payload0:
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly and if tblChanges was updated correctly
saved_object = TblEmployees.objects.using('OrgChartRead').get(
pms=self.test_pms
)
if payload['column_name'] == 'Supervisor':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.supervisor_pms.pms)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='SupervisorPMS', proposed_new_value=payload['new_value'])
elif payload['column_name'] == 'Office Title':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.office_title)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='OfficeTitle', proposed_new_value=payload['new_value'])
elif payload['column_name'] == 'Site':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_site_id.site_id)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteId', proposed_new_value=payload['new_value'])
elif payload['column_name'] == 'Floor':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_floor_id.floor_id)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId', proposed_new_value=payload['new_value'])
elif payload['column_name'] == 'Site Type':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_site_type_id.site_type_id)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId', proposed_new_value=payload['new_value'])
else:
raise ValueError(f"uncaught payload param value in test case (Remove it, or add a test case for it): '{payload['column_name']}'")
## Sequence 1 - Test auto null out of site floor and site type when site is changed
self.__null_out_test_pms_obj()
### Random value set to floor and site type to test the null out
test_emp = TblEmployees.objects.using('OrgChartWrite').get(
pms=self.test_pms
)
test_emp.actual_floor_id = TblDOTSiteFloors.objects.using('OrgChartWrite').get(floor_id__exact='BK.E.16')
test_emp.actual_site_type_id = TblDOTSiteTypes.objects.using('OrgChartWrite').get(site_type_id__exact='3')
test_emp.save(using='OrgChartWrite')
for payload in self.valid_payload1:
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly and if tblChanges was updated correctly
saved_object = TblEmployees.objects.using('OrgChartRead').get(
pms=self.test_pms
)
if payload['column_name'] == 'Site':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_site_id.site_id)
self.assert_post_key_update_equivalence(key_name='Change Site -> Auto Null out of Site Floor', key_value=None, db_value=saved_object.actual_floor_id)
self.assert_post_key_update_equivalence(key_name='Change Site -> Auto Null out of Site Type', key_value=None, db_value=saved_object.actual_site_type_id)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteId', proposed_new_value=payload['new_value'])
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId', proposed_new_value=None)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId', proposed_new_value=None)
else:
raise ValueError(f"uncaught payload param value in test case (Remove it, or add a test case for it): '{payload['column_name']}'")
## Sequence 2 - Test null out of site type when site floor is changed
self.__null_out_test_pms_obj()
### Random value set to site type to test the null out
test_emp = TblEmployees.objects.using('OrgChartWrite').get(
pms=self.test_pms
)
test_emp.actual_site_type_id = TblDOTSiteTypes.objects.using('OrgChartWrite').get(site_type_id__exact='3')
test_emp.save(using='OrgChartWrite')
for payload in self.valid_payload2:
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly and if tblChanges was updated correctly
saved_object = TblEmployees.objects.using('OrgChartRead').get(
pms=self.test_pms
)
if payload['column_name'] == 'Site':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_site_id.site_id)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteId', proposed_new_value=payload['new_value'])
elif payload['column_name'] == 'Floor':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_floor_id.floor_id)
self.assert_post_key_update_equivalence(key_name='Change Floor -> Auto Null out of Site Type', key_value=None, db_value=saved_object.actual_site_type_id)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId', proposed_new_value=payload['new_value'])
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId', proposed_new_value=None)
else:
raise ValueError(f"uncaught payload param value in test case (Remove it, or add a test case for it): '{payload['column_name']}'")
## Sequence 3 - Test auto set site type when site floor has only one site type, like 'MN.H.9'
self.__null_out_test_pms_obj()
for payload in self.valid_payload3:
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly and if tblChanges was updated correctly
saved_object = TblEmployees.objects.using('OrgChartRead').get(
pms=self.test_pms
)
if payload['column_name'] == 'Site':
## A change of Site should also null out site floor and site type. Check if data saved, and tracked in tblChanges
self.assert_post_key_update_equivalence(key_name=payload['column_name'] , key_value=payload['new_value'], db_value=saved_object.actual_site_id.site_id)
self.assert_post_key_update_equivalence(key_name='Change Site -> Auto Null out of Floor' , key_value=None , db_value=saved_object.actual_floor_id)
self.assert_post_key_update_equivalence(key_name='Change Site -> Auto Null out of Site Type', key_value=None , db_value=saved_object.actual_site_type_id)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteId' , proposed_new_value=payload['new_value'])
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId' , proposed_new_value=None)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId' , proposed_new_value=None)
elif payload['column_name'] == 'Floor':
## 'MN.H.9' should also have set site type id to 7
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'] , db_value=saved_object.actual_floor_id.floor_id)
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value='7' , db_value=saved_object.actual_site_type_id.site_type_id)
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId' , proposed_new_value=payload['new_value'])
self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId' , proposed_new_value='7')
else:
raise ValueError(f"uncaught payload param value in test case (Remove it, or add a test case for it): '{payload['column_name']}'")
def test_data_validation(self):
f"""Testing {self.api_name} data validation"""
payloads = self.valid_payload0
parameters = [
# Parameter name # Accepted type
"to_pms" # str -> int formatted str of len 7
,"column_name" # str -> must be one of the follow ['Supervisor', 'Office Title', 'Site', 'Floor', 'Site Type']
,"new_value" # str -> depends on the @column_name that was given
]
for payload in payloads:
for param_name in parameters:
if param_name == 'to_pms':
valid = [self.test_pms]
invalid = ['a', 1, 2.3, None, True, 'a123456', '12345678']
elif param_name == 'column_name':
valid = ['Supervisor', 'Office Title', 'Site', 'Floor', 'Site Type']
invalid = ['a', 1, 2.3, None, True]
elif param_name == 'new_value':
if payload['column_name'] == 'Supervisor':
valid = [TEST_SUPERVISOR_PMS]
invalid = ['a', 1, 2.3, None, True, 'a123456', '12345678']
elif payload['column_name'] == 'Office Title':
valid = ['Test Office Title Input']
invalid = [1, 2.3, None, True]
elif payload['column_name'] == 'Site':
valid = ['BK.H']
invalid = ['a', 1, 2.3, None, True]
elif payload['column_name'] == 'Floor':
valid = ['BK.H.1']
invalid = ['a', 1, 2.3, None, True]
elif payload['column_name'] == 'Site Type':
valid = ['13']
invalid = ['a', 1, 2.3, None, True]
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
def special_param_good_cond(res_content):
if (
(res_content['post_success'] == True)
or (
res_content['post_success'] == False
and any([
'No change in data, no update needed.' in res_content['post_msg'] ## this error message in save() only gets called | |
ax.axvline(i, color='white')
return ax
def matrix_waterfall_matched(
self, af, patient_col, group_col, group_order, count=10
):
"""
Compute a matrix of variant classifications with a shape of
(gene-group pairs, patients).
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
patient_col : str
AnnFrame column containing patient information.
group_col : str
AnnFrame column containing sample group information.
group_order : list
List of sample group names.
count : int, default: 10
Number of top mutated genes to include.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
"""
df = self.matrix_waterfall(count=count)
genes = df.index
missing_samples = {}
for sample in af.samples:
if sample not in df.columns:
missing_samples[sample] = ['None'] * len(genes)
df = pd.concat(
[df, pd.DataFrame(missing_samples, index=genes)], axis=1)
df = df[af.samples].T
df = df.merge(af.df[[patient_col, group_col]],
left_index=True, right_index=True)
df = df.reset_index(drop=True)
temps = []
for group in group_order:
temp = df[df[group_col] == group].set_index(patient_col)[genes]
tuples = [(x, group) for x in genes]
mi = pd.MultiIndex.from_tuples(tuples, names=['Gene', 'Group'])
temp.columns = mi
temps.append(temp)
df = pd.concat(temps, axis=1)
tuples = []
for gene in genes:
for group in group_order:
tuples.append((gene, group))
df = df[tuples]
df = df.T
c = df.applymap(lambda x: 0 if x == 'None' else 1).sort_values(
df.index.to_list(), axis=1, ascending=False).columns
df = df[c]
return df
def to_vcf(
self, fasta=None, ignore_indels=False, cols=None, names=None
):
"""
Write the MafFrame to a sorted VcfFrame.
Converting from MAF to VCF is pretty straightforward for SNVs, but it
can be challenging for INDELs and complex events involving multiple
nucleotides (e.g. 'AAGG' → 'CCCG'). This is because, for the latter
case we need to identify the "anchor" nucleotide for each event,
which is crucial for constructing a properly formatted VCF. For
example, a deletion event 'AGT' → '-' in MAF would have to be
converted to 'CAGT' → 'C' in the VCF where 'C' is our anchor
nucleotide. The position should be shifted by one as well.
In order to tackle this issue, the method makes use of a reference
assembly (i.e. FASTA file). If SNVs are your only concern, then you
do not need a FASTA file and can just set ``ignore_indels`` as True.
If you are going to provide a FASTA file, please make sure to select
the appropriate one (e.g. one that matches the genome assembly). For
example, if your MAF is in hg19/GRCh37, use the 'hs37d5.fa' file
which can be freely downloaded from the 1000 Genomes Project.
Parameters
----------
fasta : str, optional
FASTA file. Required if ``ignore_indels`` is False.
ignore_indels : bool, default: False
If True, do not include INDELs in the VcfFrame. Useful when
a FASTA file is not available.
cols : str or list, optional
Column(s) in the MafFrame which contain additional genotype
data of interest. If provided, these data will be added to
individual sample genotypes (e.g. '0/1:0.23').
names : str or list, optional
Name(s) to be displayed in the FORMAT field (e.g. AD, AF, DP).
If not provided, the original column name(s) will be displayed.
Returns
-------
VcfFrame
The VcfFrame object.
Examples
--------
>>> from fuc import pymaf
>>> mf = pymaf.MafFrame.from_file('in.maf')
>>> vf = mf.to_vcf(fasta='hs37d5.fa')
>>> vf = mf.to_vcf(ignore_indels=True)
>>> vf = mf.to_vcf(fasta='hs37d5.fa', cols='i_TumorVAF_WU', names='AF')
"""
if not ignore_indels and fasta is None:
raise ValueError("A FASTA file is required when 'ignore_indels' "
"argument is False.")
if cols is None:
cols = []
if names is None:
names = []
if isinstance(cols, str):
cols = [cols]
if isinstance(names, str):
names = [names]
if cols and not names:
names = cols
if len(cols) != len(names):
raise ValueError("Arguments 'cols' and 'names' "
"have different lengths.")
# Create the minimal VCF.
index_cols = ['Chromosome', 'Start_Position',
'Reference_Allele', 'Tumor_Seq_Allele2']
df = self.df.pivot(index=index_cols,
columns='Tumor_Sample_Barcode',
values='Tumor_Seq_Allele2')
f = lambda x: '0/0' if pd.isnull(x) else '0/1'
df = df.applymap(f)
df.columns.name = None
df = df.reset_index()
df = df.rename(columns={'Chromosome': 'CHROM',
'Start_Position': 'POS',
'Reference_Allele': 'REF',
'Tumor_Seq_Allele2': 'ALT'})
df['ID'] = '.'
df['QUAL'] = '.'
df['FILTER'] = '.'
df['INFO'] = '.'
df['FORMAT'] = 'GT'
df = df[pyvcf.HEADERS + self.samples]
# Add requested genotype information.
f = lambda x: '.' if pd.isnull(x) else str(x)
for i, col in enumerate(cols):
_ = self.df.pivot(index=index_cols,
columns='Tumor_Sample_Barcode',
values='i_TumorVAF_WU')
_ = _.reset_index()
_ = _.drop(index_cols, axis=1)
_ = _[self.samples]
_ = _.applymap(f)
df.iloc[:, 9:] = df.iloc[:, 9:] + ':' + _
df.FORMAT = df.FORMAT + ':' + names[i]
# Handle INDELs.
l = ['A', 'C', 'G', 'T']
if ignore_indels:
i = (df.REF.isin(l)) & (df.ALT.isin(l))
df = df[i]
else:
def one_row(r):
if r.REF in l and r.ALT in l:
return r
region = f'{r.CHROM}:{r.POS-1}-{r.POS-1}'
anchor = common.extract_sequence(fasta, region)
if not anchor:
return r
r.POS = r.POS - 1
if r.ALT == '-':
r.REF = anchor + r.REF
r.ALT = anchor
elif r.REF == '-':
r.REF = anchor
r.ALT = anchor + r.ALT
else:
r.REF = anchor + r.REF
r.ALT = anchor + r.ALT
return r
df = df.apply(one_row, axis=1)
# Create the metadata.
meta = [
'##fileformat=VCFv4.3',
'##source=fuc.api.pymaf.MafFrame.to_vcf',
]
# Create the VcfFrame.
vf = pyvcf.VcfFrame(meta, df)
vf = vf.sort()
return vf
def to_file(self, fn):
"""Write MafFrame to a MAF file.
Parameters
----------
fn : str
MAF file path.
"""
with open(fn, 'w') as f:
f.write(self.to_string())
def to_string(self):
"""Render MafFrame to a console-friendly tabular output.
Returns
-------
str
String representation of MafFrame.
"""
return self.df.to_csv(index=False, sep='\t')
def filter_annot(self, af, expr):
"""
Filter the MafFrame using sample annotation data.
Samples are selected by querying the columns of an AnnFrame with a
boolean expression. Samples not present in the MafFrame will be
excluded automatically.
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
expr : str
Query expression to evaluate.
Returns
-------
MafFrame
Filtered MafFrame.
Examples
--------
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> mf = pymaf.MafFrame.from_file('~/fuc-data/tcga-laml/tcga_laml.maf.gz')
>>> af = common.AnnFrame.from_file('~/fuc-data/tcga-laml/tcga_laml_annot.tsv', sample_col=0)
>>> filtered_mf = mf.filter_annot(af, "FAB_classification == 'M4'")
"""
samples = af.df.query(expr).index
i = self.df.Tumor_Sample_Barcode.isin(samples)
df = self.df[i]
mf = self.__class__(df)
return mf
def filter_indel(self, opposite=False, as_index=False):
"""
Remove rows with an indel.
Parameters
----------
opposite : bool, default: False
If True, return rows that don't meet the said criteria.
as_index : bool, default: False
If True, return boolean index array instead of MafFrame.
Returns
-------
MafFrame or pandas.Series
Filtered MafFrame or boolean index array.
Examples
--------
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.filter_indel().df.Variant_Type.unique()
array(['SNP'], dtype=object)
>>> mf.filter_indel(opposite=True).df.Variant_Type.unique()
array(['DEL', 'INS'], dtype=object)
"""
def one_row(r):
if (len(r.Reference_Allele) == 1 and
len(r.Tumor_Seq_Allele1) == 1 and
len(r.Tumor_Seq_Allele2) == 1 and
'-' not in r.Reference_Allele and
'-' not in r.Tumor_Seq_Allele1 and
'-' not in r.Tumor_Seq_Allele2):
return False
else:
return True
i = ~self.df.apply(one_row, axis=1)
if opposite:
i = ~i
if as_index:
return i
return self.__class__(self.df[i])
def variants(self):
"""
List unique variants in MafFrame.
Returns
-------
list
List of unique variants.
Examples
--------
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.variants()[:5]
['1:1571791:1571791:G:A', '1:1747228:1747228:T:G', '1:2418350:2418350:C:T', '1:3328523:3328523:G:A', '1:3638739:3638739:C:T']
"""
if self.df.empty:
return []
cols = ['Chromosome', 'Start_Position', 'End_Position', 'Reference_Allele', 'Tumor_Seq_Allele2']
df = self.df.drop_duplicates(cols)
df = df[cols]
df = df.sort_values(cols)
df = df.applymap(str)
s = df.apply(lambda r: r.str.cat(sep=':'), axis=1)
return s.to_list()
def subset(self, samples, exclude=False):
"""
Subset MafFrame for specified samples.
Parameters
----------
samples : str, list, or pandas.Series
Sample name or list of names (the order does not matters).
exclude : bool, default: False
If True, exclude specified samples.
Returns
-------
MafFrame
Subsetted MafFrame.
Examples
--------
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.shape
(2207, 193)
>>> mf.subset(['TCGA-AB-2988', 'TCGA-AB-2869']).shape
(27, 2)
>>> mf.subset(['TCGA-AB-2988', 'TCGA-AB-2869'], exclude=True).shape
(2180, 191)
"""
if isinstance(samples, str):
samples = [samples]
elif isinstance(samples, pd.Series):
samples = samples.to_list()
elif isinstance(samples, list):
pass
else:
raise TypeError(f'Incorrect input type: {type(samples)}')
if exclude:
samples | |
missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_catalog_details,
response_type="Catalog")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_catalog_details,
response_type="Catalog")
def update_connection(self, catalog_id, data_asset_key, connection_key, update_connection_details, **kwargs):
"""
Updates a specific connection of a data asset.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str connection_key: (required)
Unique connection key.
:param UpdateConnectionDetails update_connection_details: (required)
The information to be updated in the connection.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.Connection`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/connections/{connectionKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_connection got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key,
"connectionKey": connection_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_connection_details,
response_type="Connection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_connection_details,
response_type="Connection")
def update_data_asset(self, catalog_id, data_asset_key, update_data_asset_details, **kwargs):
"""
Updates a specific data asset identified by the given key.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param UpdateDataAssetDetails update_data_asset_details: (required)
The information to be updated in the data asset.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.DataAsset`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_asset_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_asset_details,
response_type="DataAsset")
def update_entity(self, catalog_id, data_asset_key, entity_key, update_entity_details, **kwargs):
"""
Updates a specific data entity.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str entity_key: (required)
Unique entity key.
:param UpdateEntityDetails update_entity_details: (required)
The information to be updated in the data entity.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.Entity`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/entities/{entityKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_entity got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key,
"entityKey": entity_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_entity_details,
response_type="Entity")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_entity_details,
response_type="Entity")
def update_folder(self, catalog_id, data_asset_key, folder_key, update_folder_details, **kwargs):
"""
Updates a specific folder of a data asset.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str folder_key: (required)
Unique folder key.
:param UpdateFolderDetails update_folder_details: (required)
The information to be updated in the folder.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.Folder`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/folders/{folderKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in | |
<filename>src/test_derlite.py
import derlite
from derlite import Tag, Oid, DecodeError
import codecs, datetime, unittest
try:
codecs.lookup('Teletex')
teletex_available = True
except LookupError:
teletex_available = False
class Test (unittest.TestCase):
def around(self, enc, der):
der = bytes.fromhex(der)
got = enc.getvalue()
self.assertEqual(got, der)
return derlite.Decoder(got)
def test_simple_values(self):
# Test round-tripping some simple values, and some of the
# decoder status methods.
enc = derlite.Encoder()
enc.write(1)
enc.write(True)
enc.write(b'\x00\x42\xFE')
dec = self.around(enc, '020101 0101FF 04030042FE')
self.assertFalse(dec.eof())
self.assertEqual(dec.read_integer(), 1)
self.assertEqual(dec.read_boolean(), True)
self.assertEqual(dec.peek(), Tag.OctetString)
self.assertEqual(dec.read_octet_string(), b'\x00\x42\xFE')
self.assertEqual(dec.peek(), None)
self.assertTrue(dec.eof())
def test_simple_compound(self):
# A quick test of enter/leave
enc = derlite.Encoder()
enc.write(-128)
enc.write( [ None ] )
enc.write( [] )
enc.write(128)
dec = self.around(enc, '020180 30020500 3000 02020080')
self.assertEqual(dec.read_integer(), -128)
self.assertFalse(dec.eof())
self.assertEqual(dec.peek(), Tag.Sequence)
dec.enter(Tag.Sequence)
self.assertEqual(dec.peek(), Tag.Null)
self.assertEqual(dec.read_octet_string(Tag.Null), b'')
self.assertTrue(dec.eof())
self.assertIsNone(dec.peek())
dec.leave()
self.assertFalse(dec.eof())
dec.enter(Tag.Sequence)
self.assertTrue(dec.eof())
dec.leave()
self.assertFalse(dec.eof())
self.assertEqual(dec.read_integer(), 128)
self.assertTrue(dec.eof())
def test_implicit_tagging(self):
enc = derlite.Encoder()
enc.enter_implicit_tag(1)
enc.write(42)
enc.write(1)
enc.enter_implicit_tag(Tag(0, True, Tag.Application))
enc.enter(Tag.Sequence)
enc.write(b'foo')
enc.write(b'bar')
enc.leave()
dec = self.around(enc,
'81012A 020101 600A 0403666F6F 0403626172')
self.assertEqual(dec.peek(), Tag(1, False, Tag.Context))
self.assertRaises(DecodeError, dec.read_integer)
dec.enter_implicit_tag(Tag(1, False, Tag.Context), Tag.Integer)
self.assertEqual(dec.read_integer(), 42)
self.assertIsNone(dec.enter_implicit_tag(1, Tag.Integer, optional=True))
self.assertEqual(dec.read_integer(), 1)
self.assertIsNone(dec.enter(Tag.Sequence, optional=True))
dec.enter_implicit_tag(Tag(0, True, Tag.Application),
Tag.Sequence)
dec.enter(Tag.Sequence)
self.assertEqual(dec.read_type(bytes), b'foo')
self.assertEqual(dec.read_octet_string(), b'bar')
self.assertRaises(DecodeError,
lambda: dec.enter_implicit_tag(Tag(2, False, Tag.Context),
Tag.Sequence))
dec.leave()
self.assertTrue(dec.eof())
def test_decode_failures(self):
# An object goes past the end
dec = derlite.Decoder(bytes.fromhex('020201'))
self.assertRaises(DecodeError, dec.read_integer)
# A sequence goes past the end
dec = derlite.Decoder(bytes.fromhex('020101 3004 020101'))
self.assertEqual(dec.read_integer(), 1)
self.assertRaises(DecodeError, dec.enter)
# An object goes past the end of its container
dec = derlite.Decoder(bytes.fromhex('020101 3003 02020100'))
self.assertEqual(dec.read_integer(), 1)
self.assertEqual(dec.enter(), Tag.Sequence)
self.assertRaises(DecodeError, dec.read_integer)
dec = derlite.Decoder(bytes.fromhex('020101 3001 02020100'))
self.assertEqual(dec.read_integer(), 1)
self.assertEqual(dec.enter(), Tag.Sequence)
self.assertRaises(DecodeError, dec.read_integer)
dec = derlite.Decoder(bytes.fromhex('0282000101 3003 0282000101'))
self.assertEqual(dec.read_integer(), 1)
self.assertEqual(dec.enter(), Tag.Sequence)
self.assertRaises(DecodeError, dec.read_integer)
def test_integers(self):
# Test correct encoding of integers of various widths
enc = derlite.Encoder()
enc.write(-129)
enc.write(128)
enc.write(-128)
enc.write(127)
enc.write(-1)
enc.write(0)
enc.write(1)
enc.write(-127)
enc.write(-256)
enc.write(255)
dec = self.around(enc,
'0202FF7F 02020080 020180 02017F 0201FF 020100 020101 020181 0202FF00 020200FF')
self.assertEqual(dec.read_integer(), -129)
self.assertEqual(dec.read_integer(), 128)
self.assertEqual(dec.read_integer(), -128)
self.assertEqual(dec.read_integer(), 127)
self.assertEqual(dec.read_integer(), -1)
self.assertEqual(dec.read_integer(), 0)
self.assertEqual(dec.read_integer(), 1)
self.assertEqual(dec.read_integer(), -127)
self.assertEqual(dec.read_integer(), -256)
self.assertEqual(dec.read_integer(), 255)
def test_tagobject(self):
self.assertEqual(repr(Tag.Sequence),
'Tag.Sequence')
self.assertEqual(repr(Tag(0, constructed=True, cls=Tag.Application)),
'Tag(0, constructed=True, cls=Tag.Application)')
def test_tagforms(self):
# Test encoding of tags
enc = derlite.Encoder()
# Tag numbers >= 31 have a different encoding
enc.enter(31)
enc.write_tagged_bytes(Tag(16, constructed=False, cls=Tag.Application),
b' ')
# Object lengths >= 128 bytes have a different encoding
ablob = b'ABCDE' * 100
enc.write_tagged_bytes(Tag(1000, constructed=False, cls=Tag.Context),
ablob)
enc.leave()
dec = self.around(enc,
'BF1F820202 5006202020202020' +
'9F87688201F4 ' +
( '4142434445' * 100 ))
self.assertIsNone(dec.enter(32, optional=True))
self.assertEqual(dec.enter(31),
Tag(31, constructed=True, cls=Tag.Context))
self.assertRaises(derlite.DecodeError, dec.read_octet_string)
self.assertEqual(dec.read_octet_string(Tag(16, cls=Tag.Application)),
b' ')
self.assertEqual(dec.read_octet_string(Tag(1000, cls=Tag.Context)),
ablob)
self.assertTrue(dec.eof())
dec.leave()
self.assertTrue(dec.eof())
self.assertRaises(derlite.Error, dec.leave)
def test_set1(self):
# Simple test of set encoding: the DER encoder is responsible
# for ensuring the element ordering required by DER
enc = derlite.Encoder()
enc.write( set([ -1, 0, 1 ]) )
dec = self.around(enc, '3109 020100 020101 0201FF')
dec.enter(Tag.Set)
self.assertEqual(dec.read_integer(), 0)
self.assertEqual(dec.read_integer(), 1)
self.assertEqual(dec.read_integer(), -1)
dec.leave()
self.assertTrue(dec.eof())
def test_set2(self):
# More set tests
enc = derlite.Encoder()
enc.write_set( bytes([ 1, b ]) for b in (4, 2, 8) )
dec = self.around(enc, '310C 04020102 04020104 04020108')
enc = derlite.Encoder()
enc.write_set( [ None, False, [], Oid((1, 10)), True ] )
dec = self.around(enc, '310D 010100 0101FF 0500 060132 3000')
def test_set3(self):
enc = derlite.Encoder()
enc.write_set( [ 1, 0 ],
pythontype=derlite.ExplicitlyTagged(0, int))
self.assertEqual(enc.getvalue(),
bytes.fromhex('310A A003020100 A003020101'))
def test_strings_1(self):
# Test decoding some strings.
# IA5String and UTF8String
dec = derlite.Decoder(b'\x16\x06flambe\x0c\x07flamb\xC3\xA9')
self.assertEqual(dec.read_string(), 'flambe')
self.assertEqual(dec.read_string(), 'flamb\u00E9')
self.assertTrue(dec.eof())
# PrintableString and (simple) GeneralString.
dec = derlite.Decoder(b'\x13\x05hello\x1B\x06world!')
self.assertEqual(dec.read_string(), 'hello')
self.assertEqual(dec.read_string(), 'world!')
self.assertTrue(dec.eof())
def test_strings_teletex_ascii(self):
dec = derlite.Decoder(b'\x14\x1FSome parts of T.61 match ASCII.')
self.assertEqual(dec.read_string(), 'Some parts of T.61 match ASCII.')
self.assertTrue(dec.eof())
@unittest.skipUnless(teletex_available,
"Teletex/T.61 codec is not available.")
def test_strings_teletex(self):
dec = derlite.Decoder(b'\x14\x03See\x14\x07\xECe Olde' +
b'\x14\x28\xABM\xC8uller, Fran\xCBcois, \xEArsted, l\'H\xC3opital\xBB' +
b'\x14\x03(\xA4)')
self.assertEqual(dec.read_string(), 'See')
self.assertEqual(dec.read_string(), '\u00DEe Olde')
self.assertEqual(dec.read_string(), '\u00ABM\u00FCller, Fran\u00E7ois, \u0152rsted, l\'H\u00F4pital\u00BB')
self.assertEqual(dec.read_string(), '($)')
self.assertTrue(dec.eof())
def test_bad_usage(self):
# Trying to getvalue() when we don't have a complete object
enc = derlite.Encoder()
enc.enter(Tag(1, True, Tag.Application))
enc.write(True)
self.assertRaises(derlite.Error,
enc.getvalue)
# Trying to leave() more than we enter()
enc = derlite.Encoder()
enc.enter(2)
enc.write(1)
enc.leave()
enc.write(2)
self.assertRaises(derlite.Error,
enc.leave)
# Trying to write an unsupported type
enc = derlite.Encoder()
enc.write(b'foo')
self.assertRaises(TypeError,
enc.write, u'bar')
class TestDatetimes (unittest.TestCase):
def roundtrip(self, dt, der):
enc = derlite.Encoder()
enc.write(dt)
got = enc.getvalue()
self.assertEqual(got, der)
dec = derlite.Decoder(der)
got = dec.read_generalizedtime()
self.assertEqual(dt, got)
def test_naive(self):
self.roundtrip(datetime.datetime.utcfromtimestamp(0),
b'\x18\x0e19700101000000' )
self.roundtrip(datetime.datetime.utcfromtimestamp(86460.75),
b'\x18\x1119700102000100.75' )
def test_utc(self):
utc = datetime.timezone.utc
self.roundtrip(datetime.datetime.fromtimestamp(0, utc),
b'\x18\x0f19700101000000Z' )
self.roundtrip(datetime.datetime.fromtimestamp(1.25, utc),
b'\x18\x1219700101000001.25Z' )
dec = derlite.Decoder( b'\x18\x0d198002010000Z' +
b'\x18\x0b1980020100Z' )
self.assertEqual( datetime.datetime(1980, 2, 1, 0, 0, 0,
tzinfo=utc),
dec.read_generalizedtime())
self.assertEqual( datetime.datetime(1980, 2, 1, 0, 0, 0,
tzinfo=utc),
dec.read_generalizedtime())
def test_tzs(self):
london = datetime.timezone(datetime.timedelta(0, 0))
newfoundland = datetime.timezone(datetime.timedelta(hours = -3, minutes = -30))
newcaledonia = datetime.timezone(datetime.timedelta(hours = 11))
self.roundtrip(datetime.datetime(1980, 2, 29, 6, 45, 12,
tzinfo=newfoundland),
b'\x18\x1319800229064512-0330')
self.roundtrip(datetime.datetime(1988, 3, 1, 0, 5, 15,
microsecond=368100,
tzinfo=london),
b'\x18\x1419880301000515.3681Z')
self.roundtrip(datetime.datetime(1992, 12, 31, 23, 30,
tzinfo=newcaledonia),
b'\x18\x1319921231233000+1100')
class TestOids (unittest.TestCase):
def roundtrip(self, arcs, der):
o = Oid(arcs)
self.assertEqual(o.as_der(), der)
o = Oid(der)
self.assertEqual(o.arcs(), arcs)
def test_encoding(self):
self.roundtrip( (1,2,3), b'\x06\x02\x2A\x03')
self.roundtrip( (1,2,840,10040,4,1), b'\x06\x07\x2A\x86\x48\xCE\x38\x04\x01')
self.roundtrip( (2,5,4,3), b'\x06\x03\x55\x04\x03')
def test_parses(self):
self.assertEqual(Oid( '2.5.4.3' ).as_der(),
b'\x06\x03\x55\x04\x03')
self.assertEqual(Oid( '{ 1.2.840.10040.4.1 }' ).as_der(),
b'\x06\x07\x2A\x86\x48\xCE\x38\x04\x01')
self.assertEqual(Oid( [ 3, 9 ] ).arcs(),
(3, 9))
self.assertRaises(Exception,
Oid, 42)
self.assertRaises(derlite.DecodeError,
Oid, b'\x03\x03\x55\x04\x03')
self.assertRaises(derlite.DecodeError,
lambda x: Oid(x).arcs(),
b'\x06\x02\x2A\x03\x01')
def test_misc(self):
# Creating sub-OIDs, comparison, hashing, etc.
self.assertEqual(str(Oid( (2,5,4,3) )),
'2.5.4.3')
pkcs = Oid( '1.2.840.113549.1' )
pkcs1 = pkcs + (1,)
self.assertEqual(repr(pkcs1),
'Oid((1, 2, 840, 113549, 1, 1))')
pkcs_ = Oid(b'\x06\x07*\x86H\x86\xf7\x0d\x01')
self.assertEqual(pkcs, pkcs_)
self.assertLess(pkcs_, pkcs1)
self.assertGreater(pkcs1, pkcs)
s = set()
self.assertNotIn(pkcs, s)
self.assertNotIn(pkcs_, s)
self.assertNotIn(pkcs1, s)
s.add(pkcs)
self.assertIn(pkcs, s)
self.assertIn(pkcs_, s)
self.assertNotIn(pkcs1, s)
s.add(pkcs_)
self.assertEqual(len(s), 1)
s.add(pkcs1)
self.assertEqual(len(s), 2)
self.assertIn(pkcs, s)
self.assertIn(pkcs1, s)
def test_bad_values(self):
oid = Oid('1.2.3')
self.assertRaises(TypeError,
lambda: oid + (5, 6, "bananas", 7))
# There's no encoding for a 0- or 1-element OID.
self.assertRaises(ValueError,
lambda a: Oid(a).as_der(),
(1,))
self.assertRaises(ValueError,
lambda a: Oid(a).as_der(),
())
# The first two elements have constrained ranges because
# of the way they're encoded.
self.assertRaises(ValueError,
lambda a: Oid(a).as_der(),
(1000,2,3,4))
self.assertRaises(ValueError,
lambda a: Oid(a).as_der(),
(1,50,3))
class BitSetTest (unittest.TestCase):
def roundtrip(self, fs, flags, der):
der = bytes.fromhex(der)
self.assertEqual(fs.make_der(flags, tl=True), der)
dec = derlite.Decoder(der)
self.assertEqual(fs.decode_der(dec), flags)
def test(self):
fs = derlite.OptionFlagSet('foo',
( ('bob', 1),
('carol', 2),
('ted', 3),
('alice', 5) ))
self.roundtrip(fs, set(), '03 01 00')
self.roundtrip(fs, set(['bob']), '03 02 06 40')
self.roundtrip(fs, set(['carol']), '03 02 05 20')
self.roundtrip(fs, set(['bob', 'carol', 'ted', 'alice']), '03 02 02 74')
self.roundtrip(fs, set(['alice']), '03 02 02 04')
self.roundtrip(fs, set(['alice', 7]), '03 02 00 05')
self.roundtrip(fs, set(['alice', 8]), '03 03 07 0480')
@staticmethod
def expected_padding(bitwidth):
return 7 - ((bitwidth+7) % 8)
def test_widths(self):
for dw in (1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 16, 17, 30, 31, 32, 33):
fs = derlite.OptionFlagSet('dw',
( ('a', 1),
('b', dw-1),
('c', dw) ),
min_width = dw)
b0 = fs.make_der([], tl=False)
self.assertEqual(b0[0], self.expected_padding(dw))
b1 = fs.make_der([ 'a' ], tl=False)
self.assertEqual(b1[0], self.expected_padding(max(dw,2)))
self.assertEqual(b1[1], 0x40)
b2 = fs.make_der([ 'b' ], tl=False)
self.assertEqual(b2[0], self.expected_padding(dw))
self.assertEqual(b2[-1], 1 << (7 - ((dw-1)%8)))
b3 = fs.make_der([ 'c' ], tl=False)
self.assertEqual(b3[0], self.expected_padding(dw+1))
self.assertEqual(b3[-1], 1 << (7 - (dw%8)))
class TypeCombTest (unittest.TestCase):
def roundtrip(self, d, dt, der):
der = bytes.fromhex(der)
enc = derlite.Encoder()
enc.write_value_of_type(d, dt)
got = enc.getvalue()
self.assertEqual(got, der)
dec = derlite.Decoder(der)
got = dec.read_type(dt)
self.assertEqual(d, got)
def test_seqs(self):
thing = derlite.Structure(
( int,
datetime.datetime,
derlite.SequenceOf(Oid),
bytes
)
)
self.roundtrip(
( 1000, datetime.datetime(1969, 7, 20, 10, 56),
[ Oid( (1, k, 10) ) for k in (0, 3, 9) ],
b'some more stuff' ),
thing,
'3033'
'020203E8'
'180E' + bytes.hex(b'19690720105600') +
'300C'
'0602280A 06022B0A 0602310A'
'040F' + bytes.hex(b'some more stuff'))
self.roundtrip(
( -1, datetime.datetime(1234, 4, 5, 6, 7, 8),
[],
b'' ),
thing,
'3017'
'0201FF'
'180E' + bytes.hex(b'12340405060708') +
'3000'
'0400')
thing = ( int, bool, bytes )
self.roundtrip(
( -129, False, b'' ),
thing,
'0202FF7F 010100 0400')
thing = derlite.ExplicitlyTagged(
Tag(16, True, Tag.Application),
derlite.Structure(
(
derlite.SequenceOf(int),
Oid
)
)
)
self.roundtrip( ([1,2,3], Oid((1,2,3))),
thing,
'7011 '
'300F '
'3009 020101 020102 020103'
'06022A03')
def test_choice(self):
thing = derlite.SequenceOf( derlite.Choice(
(
int,
Oid,
bool,
)
))
self.roundtrip(
[],
thing,
'3000')
self.roundtrip(
[ 42 ],
thing,
'3003 02012A')
self.roundtrip(
[ Oid('1.8.10000'), | |
= info_row.split()
info_id = int(info_list[0])
info_var = info_list[12]
if info_id >= 0:
list_vars.append(info_var)
var_box = list(set(list_vars))
ids_info = [str(id_start), str(id_end), str(id_period)]
ids_box = '/'.join(ids_info)
return var_box, ids_box
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create a tmp name
def create_filename_tmp(prefix='gfs_tmp_', suffix='.grib2', folder=None):
if folder is None:
folder = '/tmp'
with tempfile.NamedTemporaryFile(dir=folder, prefix=prefix, suffix=suffix, delete=False) as tmp:
temp_file_name = tmp.name
return temp_file_name
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to retrieve and store data (multiprocess)
def retrieve_data_source_mp(src_data, dst_data, flag_updating=False, process_n=20, process_max=None, limit=9999):
logging.info(' ----> Downloading data in multiprocessing mode ... ')
if process_max is None:
process_max = cpu_count() - 1
if process_n > process_max:
logging.warning(' ----> Maximum of recommended processes must be less then ' + str(process_max))
logging.warning(' ----> Set number of process from ' + str(process_n) + ' to ' + str(process_max))
process_n = process_max
data_list = []
data_check = []
for (src_data_key, src_data_list), (dst_data_key, dst_data_list) in zip(src_data.items(), dst_data.items()):
for src_step_url, dst_step_path in zip(src_data_list, dst_data_list):
dst_step_root, dst_step_file = split(dst_step_path)
make_folder(dst_step_root)
if exists(dst_step_path) and flag_updating:
flag_updating = True
elif (not exists(dst_step_path)) and flag_updating:
flag_updating = True
elif (not exists(dst_step_path)) and (not flag_updating):
flag_updating = True
if flag_updating:
data_list.append([src_step_url, dst_step_path])
data_check.append([src_step_url, dst_step_path])
if len(data_list)>limit:
for i in range(0,len(data_list),limit):
max_available = min(i + limit, len(data_list))
chunk = data_list[i:i + limit]
with Pool(processes=process_n, maxtasksperchild=1) as process_pool:
_ = process_pool.map(request_data_source, chunk, chunksize=1)
process_pool.close()
process_pool.join()
logging.info(' ----> Wait 60 seconds for next requests ...')
time.sleep(60)
if max_available<len(data_list):
logging.info(' ----> ' + str(int(100*max_available/len(data_list))) + ' % complete...')
logging.info(' ----> Continue with next chunk of requests...')
else:
with Pool(processes=process_n, maxtasksperchild=1) as process_pool:
_ = process_pool.map(request_data_source, data_list, chunksize=1)
process_pool.close()
process_pool.join()
find_data_corrupted(data_check)
logging.info(' ----> Downloading data in multiprocessing mode ... DONE')
# -------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# Method to find outliers and to retry for downloading data again
def find_data_corrupted(data_list, data_perc_min=5, data_size_min=100000):
logging.info(' -----> Checking for corrupted or unavailable data ... ')
data_size = []
idx_nodata = []
for dst_id, dst_step_path in enumerate(data_list):
if os.path.exists(dst_step_path[1]):
dst_step_size = os.path.getsize(dst_step_path[1])
else:
dst_step_size = 0
idx_nodata.append(dst_id)
data_size.append(dst_step_size)
data_size = np.asarray(data_size)
data_p_min = np.percentile(data_size, data_perc_min)
idx_false = np.where(data_size < min([data_size_min, data_p_min]))[0]
idx_nodata = np.asarray(idx_nodata, int)
idx_retry = np.unique(np.concatenate((idx_false, idx_nodata), axis=0))
for idx_step in idx_retry:
data_false = data_list[idx_step]
if os.path.exists(data_false[1]):
os.remove(data_false[1])
logging.info(' ------> Downloading data ' + split(data_false[1])[1] + ' ... ')
request_data_source(data_false)
logging.info(' ------> Downloading data ' + split(data_false[1])[1] + ' ... DONE')
logging.info(' -----> Checking for corrupted or unavailable data ... DONE')
# ------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to request data using a source url and a destination filename
def request_data_source(data_list):
logging.info(' :: Http request for downloading: ' + data_list[0] + ' ... ')
logging.info(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... ')
try:
urllib.request.urlretrieve(data_list[0], filename=data_list[1])
logging.info(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... DONE')
logging.info(' :: Http request for downloading: ' + data_list[0] + ' ... DONE')
return True
except IOError:
logging.warning(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... FAILED')
logging.error(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. IO error.')
raise IOError(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Data Not available on the server.')
except ConnectionResetError:
logging.warning(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... FAILED')
logging.error(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Connection Reset error')
raise ConnectionResetError(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Connection Reset error')
except ConnectionAbortedError:
logging.warning(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... FAILED')
logging.error(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Connetction Aborted error.')
raise ConnectionAbortedError(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Connetction Aborted error.')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to retrieve and store data (sequential)
def retrieve_data_source_seq(src_data, dst_data, flag_updating=False, limit=9999):
logging.info(' ----> Downloading data in sequential mode ... ')
data_list = []
data_check = []
hit_count = 0
for (src_data_key, src_data_list), (dst_data_key, dst_data_list) in zip(src_data.items(), dst_data.items()):
logging.info(' -----> DataType: ' + src_data_key + ' ... ')
for src_step_url, dst_step_path in zip(src_data_list, dst_data_list):
dst_step_root, dst_step_file = split(dst_step_path)
make_folder(dst_step_root)
logging.info(' ------> Save data in file: ' + str(dst_step_file) + ' ... ')
if exists(dst_step_path) and flag_updating:
flag_updating = True
elif (not exists(dst_step_path)) and flag_updating:
flag_updating = True
elif (not exists(dst_step_path)) and (not flag_updating):
flag_updating = True
if flag_updating:
request_data_source([src_step_url, dst_step_path])
hit_count += 1
data_list.append([src_step_url, dst_step_path])
logging.info(' -------> Save data in file: ' + str(dst_step_file) + ' ... DONE')
if hit_count == limit:
logging.info(' ----> Wait 60 seconds for next requests ...')
time.sleep(60)
hit_count = 0
logging.info(' ----> Continue with next chunk of requests...')
else:
logging.info(' ------> Save data in file: ' + str(dst_step_file) +
' ... SKIPPED. File saved previously')
data_check.append([src_step_url, dst_step_path])
logging.info(' -----> DataType: ' + src_data_key + ' ... DONE')
find_data_corrupted(data_check)
logging.info(' ----> Downloading data in sequential mode ... DONE')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create data outcome list
def set_data_outcome(time_run, ens_member_num, data_def, geo_def, ancillary_def, tags_template,
type_data=None, flag_updating=True):
if type_data is None:
type_data = ["surface"]
folder_list = data_def['folder']
filename_list = data_def['filename']
lon_right = geo_def['lon_right']
lon_left = geo_def['lon_left']
lat_top = geo_def['lat_top']
lat_bottom = geo_def['lat_bottom']
domain = ancillary_def['domain']
ens_member = str(ens_member_num).zfill(2)
hour_run = time_run.hour
datetime_run = time_run.to_pydatetime()
file_ws = {}
for folder_raw, filename_raw, type_step in zip(folder_list, filename_list, type_data):
file_list = []
tags_values_step = {"domain": domain,
"outcome_sub_path_time": datetime_run, "outcome_datetime": datetime_run,
"run_hour": hour_run, "run_step": 0,
"run_datetime": datetime_run,
"run_lon_right": str(lon_right),
"run_lon_left": str(lon_left),
"run_lat_bottom": str(lat_bottom),
"run_lat_top": str(lat_top),
"ens_member" : ens_member}
folder_step = fill_tags2string(folder_raw, tags_template, tags_values_step)
filename_step = fill_tags2string(filename_raw, tags_template, tags_values_step)
path_step = join(folder_step, filename_step)
if flag_updating:
if os.path.exists(path_step):
os.remove(path_step)
if not os.path.exists(folder_step):
make_folder(folder_step)
file_list.append(path_step)
file_ws[type_step] = file_list
return file_ws
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create data ancillary list
def set_data_ancillary(time_run, time_range, ens_member_num, data_def, geo_def, ancillary_def, tags_template,
type_data=None, anl_include=False):
if type_data is None:
type_data = ["surface"]
folder_list = data_def['folder']
filename_list = data_def['filename']
lon_right = geo_def['lon_right']
lon_left = geo_def['lon_left']
lat_top = geo_def['lat_top']
lat_bottom = geo_def['lat_bottom']
domain = ancillary_def['domain']
ens_member = str(ens_member_num).zfill(2)
hour_run = time_run.hour
datetime_run = time_run.to_pydatetime()
file_ws = {}
for folder_raw, filename_raw, type_step in zip(folder_list, filename_list, type_data):
file_list = []
for time_id, time_step in enumerate(time_range):
if not anl_include:
time_id = time_id + 1
datetime_step = time_step.to_pydatetime()
tags_values_step = {"domain": domain,
"ancillary_sub_path_time": datetime_run, "ancillary_datetime": datetime_step,
"run_hour": hour_run, "run_step": time_id,
"run_datetime": datetime_run,
"run_lon_right": str(lon_right),
"run_lon_left": str(lon_left),
"run_lat_bottom": str(lat_bottom),
"run_lat_top": str(lat_top),
"ens_member": ens_member}
folder_step = fill_tags2string(folder_raw, tags_template, tags_values_step)
filename_step = fill_tags2string(filename_raw, tags_template, tags_values_step)
path_step = join(folder_step, filename_step)
file_list.append(path_step)
file_ws[type_step] = file_list
return file_ws
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create data source list
def set_data_source(time_run, time_range, ens_member_num, data_def, geo_def, ancillary_def, tags_template,
type_data=None, anl_include=False):
if type_data is None:
type_data = ["surface"]
url_root_list = data_def['url_root']
url_file_list = data_def['url_file']
url_lev_list = data_def['url_lev']
url_vars_list = data_def['url_vars']
url_bbox_list = data_def['url_bbox']
url_loc_list = data_def['url_loc']
lon_right = geo_def['lon_right']
lon_left = geo_def['lon_left']
lat_top = geo_def['lat_top']
lat_bottom = geo_def['lat_bottom']
domain = ancillary_def['domain']
ens_member = str(ens_member_num).zfill(2)
frc_steps = (time_range - time_run).total_seconds()/3600
hour_run = time_run.hour
datetime_run = time_run.to_pydatetime()
url_ws = {}
for url_root_raw, url_file_raw, url_lev_raw, url_vars_raw, url_bbox_raw, url_loc_raw, type_step in zip(
url_root_list, url_file_list, url_lev_list, url_vars_list, url_bbox_list, url_loc_list, type_data):
if url_bbox_raw is None:
url_bbox_raw = ''
url_list = []
for time_id, time_step in zip(frc_steps, time_range):
datetime_step = time_step.to_pydatetime()
tags_values_step = {"domain": domain,
"outcome_sub_path_time": datetime_run, "outcome_datetime": datetime_step,
"run_hour": hour_run, "run_step": int(time_id),
"run_datetime": datetime_run,
"run_lon_right": str(lon_right),
"run_lon_left": str(lon_left),
"run_lat_bottom": str(lat_bottom),
"run_lat_top": str(lat_top),
"ens_member": ens_member}
url_root_step = fill_tags2string(url_root_raw, tags_template, tags_values_step)
url_file_step = fill_tags2string(url_file_raw, tags_template, tags_values_step)
url_lev_step = fill_tags2string(url_lev_raw, tags_template, tags_values_step)
url_vars_step = fill_tags2string(url_vars_raw, tags_template, tags_values_step)
url_bbox_step = fill_tags2string(url_bbox_raw, tags_template, tags_values_step)
url_loc_step = fill_tags2string(url_loc_raw, tags_template, tags_values_step)
url_step = url_root_step + url_file_step + url_lev_step + url_vars_step + url_bbox_step + url_loc_step
url_list.append(url_step)
url_ws[type_step] = url_list
return url_ws
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to add time in a unfilled string (path or filename)
def fill_tags2string(string_raw, tags_format=None, tags_filling=None):
apply_tags = False
if string_raw is not None:
for tag in list(tags_format.keys()):
if tag in string_raw:
apply_tags = True
break
if apply_tags:
tags_format_tmp = deepcopy(tags_format)
for tag_key, tag_value in tags_format.items():
tag_key_tmp = '{' + tag_key + '}'
if tag_value is not None:
if tag_key_tmp in string_raw:
| |
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import torch
import torch.utils.checkpoint
from collections.abc import Sequence
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embeds
from ..core.ffnet import Classifier, FFNet, Masker
from ..prep.config.bert import PreTrained
log = logging.get_logger(__name__)
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...pytorch_utils import softmax_backward_data
LIST = [
"microsoft/deberta-base",
"microsoft/deberta-large",
"microsoft/deberta-xlarge",
"microsoft/deberta-base-mnli",
"microsoft/deberta-large-mnli",
"microsoft/deberta-xlarge-mnli",
]
class ContextPool(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.drop = StableDropout(config.pooler_dropout)
self.config = config
def forward(self, hiddens):
context_token = hiddens[:, 0]
context_token = self.drop(context_token)
pooled_output = self.dense(context_token)
pooled_output = qu.activation(self.config.pooler_hidden_act)(pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.d_model
class XSoftmax(torch.autograd.Function):
@staticmethod
def forward(self, input, mask, dim):
self.dim = dim
rmask = ~(mask.bool())
output = input.masked_fill(rmask, float("-inf"))
output = torch.softmax(output, self.dim)
output.masked_fill_(rmask, 0)
self.save_for_backward(output)
return output
@staticmethod
def backward(self, grad_output):
(output,) = self.saved_tensors
inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
return inputGrad, None, None
@staticmethod
def symbolic(g, self, mask, dim):
import torch.onnx.symbolic_helper as sym_help
from torch.onnx.symbolic_opset9 import masked_fill, softmax
mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
r_mask = g.op(
"Cast",
g.op(
"Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value
),
to_i=sym_help.cast_pytorch_to_onnx["Byte"],
)
output = masked_fill(g, self, r_mask, g.op("Constant", value_t=torch.tensor(float("-inf"))))
output = softmax(g, output, dim)
return masked_fill(
g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.uint8))
)
class DropoutContext(object):
def __init__(self):
self.drop = 0
self.mask = None
self.scale = 1
self.reuse_mask = True
def get_mask(input, local_context):
if not isinstance(local_context, DropoutContext):
drop = local_context
mask = None
else:
drop = local_context.drop
drop *= local_context.scale
mask = local_context.mask if local_context.reuse_mask else None
if drop > 0 and mask is None:
mask = (1 - torch.empty_like(input).bernoulli_(1 - drop)).bool()
if isinstance(local_context, DropoutContext):
if local_context.mask is None:
local_context.mask = mask
return mask, drop
class XDropout(torch.autograd.Function):
@staticmethod
def forward(ctx, input, local_ctx):
mask, drop = get_mask(input, local_ctx)
ctx.scale = 1.0 / (1 - drop)
if drop > 0:
ctx.save_for_backward(mask)
return input.masked_fill(mask, 0) * ctx.scale
else:
return input
@staticmethod
def backward(ctx, grad_output):
if ctx.scale > 1:
(mask,) = ctx.saved_tensors
return grad_output.masked_fill(mask, 0) * ctx.scale, None
else:
return grad_output, None
class StableDropout(qc.Module):
def __init__(self, drop_prob):
super().__init__()
self.drop_prob = drop_prob
self.count = 0
self.context_stack = None
def forward(self, x):
if self.training and self.drop_prob > 0:
return XDropout.apply(x, self.get_context())
return x
def clear_context(self):
self.count = 0
self.context_stack = None
def init_context(self, reuse_mask=True, scale=1):
if self.context_stack is None:
self.context_stack = []
self.count = 0
for c in self.context_stack:
c.reuse_mask = reuse_mask
c.scale = scale
def get_context(self):
if self.context_stack is not None:
if self.count >= len(self.context_stack):
self.context_stack.append(DropoutContext())
ctx = self.context_stack[self.count]
ctx.drop = self.drop_prob
self.count += 1
return ctx
else:
return self.drop_prob
class DebertaLayerNorm(qc.Module):
def __init__(self, size, eps=1e-12):
super().__init__()
self.weight = nn.Parameter(torch.ones(size))
self.bias = nn.Parameter(torch.zeros(size))
self.variance_epsilon = eps
def forward(self, hiddens):
input_type = hiddens.dtype
hiddens = hiddens.float()
mean = hiddens.mean(-1, keepdim=True)
variance = (hiddens - mean).pow(2).mean(-1, keepdim=True)
hiddens = (hiddens - mean) / torch.sqrt(variance + self.variance_epsilon)
hiddens = hiddens.to(input_type)
y = self.weight * hiddens + self.bias
return y
def build_relative_position(query_size, key_size, device):
q_ids = torch.arange(query_size, dtype=torch.long, device=device)
k_ids = torch.arange(key_size, dtype=torch.long, device=device)
rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = rel_pos_ids.unsqueeze(0)
return rel_pos_ids
@torch.jit.script
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
return c2p_pos.expand(
[query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]
)
@torch.jit.script
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
return c2p_pos.expand(
[query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)]
)
@torch.jit.script
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
class DebertaEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
PAD = getattr(config, "PAD", 0)
self.d_embed = getattr(config, "d_embed", config.d_model)
self.word_embeddings = qc.Embed(config.s_vocab, self.d_embed, padding_idx=PAD)
self.position_biased_input = getattr(config, "position_biased_input", True)
if not self.position_biased_input:
self.position_embeddings = None
else:
self.position_embeddings = qc.Embed(config.n_pos, self.d_embed)
if config.n_typ > 0:
self.token_type_embeddings = qc.Embed(config.n_typ, self.d_embed)
if self.d_embed != config.d_model:
self.embed_proj = qc.Linear(self.d_embed, config.d_model, bias=False)
self.norm = DebertaLayerNorm(config.d_model, config.eps)
self.drop = StableDropout(config.drop)
self.config = config
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1)))
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids.long())
else:
position_embeddings = torch.zeros_like(inputs_embeds)
embeddings = inputs_embeds
if self.position_biased_input:
embeddings += position_embeddings
if self.config.n_typ > 0:
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings += token_type_embeddings
if self.d_embed != self.config.d_model:
embeddings = self.embed_proj(embeddings)
embeddings = self.norm(embeddings)
if mask is not None:
if mask.dim() != embeddings.dim():
if mask.dim() == 4:
mask = mask.squeeze(1).squeeze(1)
mask = mask.unsqueeze(2)
mask = mask.to(embeddings.dtype)
embeddings = embeddings * mask
embeddings = self.drop(embeddings)
return embeddings
class Model(PreTrained):
def __init__(self, config):
super().__init__(config)
self.embeddings = DebertaEmbeddings(config)
self.encoder = Encoder(config)
self.z_steps = 0
self.config = config
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
mask=attention_mask,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict,
)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hiddens = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(
hiddens,
attention_mask,
output_attentions=False,
query_states=query_states,
relative_pos=rel_pos,
rel_embeddings=rel_embeddings,
)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
if not return_dict:
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
return qo.Base(
y=sequence_output,
hiddens=encoder_outputs.hiddens if output_hidden_states else None,
attns=encoder_outputs.attns,
)
class ForMasked(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Masker(cfg.d_embed, **kw)
forward = qf.forward_masked
class ForSeqClassifier(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(**kw)
forward = qf.forward_seq
class ForTokClassifier(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(**kw)
forward = qf.forward_tok
class ForSeqClassifier(PreTrained):
def __init__(self, config):
super().__init__(config)
n_labels = getattr(config, "n_labels", 2)
self.n_labels = n_labels
self.deberta = Model(config)
self.pooler = ContextPool(config)
output_dim = self.pooler.output_dim
self.classifier = qc.Linear(output_dim, n_labels)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.drop if drop_out is None else drop_out
self.drop = StableDropout(drop_out)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.drop(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.n_labels == 1:
# regression task
loss_fn = nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = loss_fn(logits, labels.view(-1))
elif labels.dim() == 1 or labels.size(-1) == 1:
label_index = (labels >= 0).nonzero()
labels = labels.long()
if label_index.size(0) > 0:
labeled_logits = torch.gather(
logits, 0, label_index.expand(label_index.size(0), logits.size(1))
)
labels = torch.gather(labels, 0, label_index.view(-1))
loss_fct = CrossEntropyLoss()
loss = loss_fct(
labeled_logits.view(-1, self.n_labels).float(), labels.view(-1)
)
else:
loss = torch.tensor(0).to(logits)
else:
log_softmax = nn.LogSoftmax(-1)
loss = -((log_softmax(logits) * labels).sum(-1)).mean()
elif self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.n_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
| |
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
"""
Tools for parsing QE PW input files.
"""
import re
from typing import Tuple
import numpy as np
from .. import CONSTANTS
from ..exceptions import ParsingError, InputValidationError
from .._qe_version import parse_version
RE_FLAGS = re.M | re.X | re.I
__all__ = tuple() # type: Tuple[str, ...]
class _BaseInputFile:
"""
Class used for parsing Quantum Espresso pw.x input files and using the info.
Members:
* ``namelists``:
A nested dictionary of the namelists and their key-value
pairs. The namelists will always be upper-case keys, while the parameter
keys will always be lower-case.
For example::
{"CONTROL": {"calculation": "bands",
"prefix": "al",
"pseudo_dir": "./pseudo",
"outdir": "./out"},
"ELECTRONS": {"diagonalization": "cg"},
"SYSTEM": {"nbnd": 8,
"ecutwfc": 15.0,
"celldm(1)": 7.5,
"ibrav": 2,
"nat": 1,
"ntyp": 1}
}
* ``atomic_positions``:
A dictionary with
* units: the units of the positions (always lower-case) or None
* names: list of the atom names (e.g. ``'Si'``, ``'Si0'``,
``'Si_0'``)
* positions: list of the [x, y, z] positions
* fixed_coords: list of [x, y, z] (bools) of the force modifications
(**Note:** True <--> Fixed, as defined in the
``BasePwCpInputGenerator._if_pos`` method)
For example::
{'units': 'bohr',
'names': ['C', 'O'],
'positions': [[0.0, 0.0, 0.0],
[0.0, 0.0, 2.5]]
'fixed_coords': [[False, False, False],
[True, True, True]]}
* ``cell_parameters``:
A dictionary (if CELL_PARAMETERS is present; else: None) with
* units: the units of the lattice vectors (always lower-case) or
None
* cell: 3x3 list with lattice vectors as rows
For example::
{'units': 'angstrom',
'cell': [[16.9, 0.0, 0.0],
[-2.6, 8.0, 0.0],
[-2.6, -3.5, 7.2]]}
* ``k_points``:
A dictionary containing
* type: the type of kpoints (always lower-case)
* points:
- if type != 'automatic': an Nx3 list of the kpoints
(will not be present if type = 'gamma')
- if type == 'automatic': a 1x3 list of the number of
equally-spaced points in each direction of the Brillouin zone,
as in Monkhorst-Pack grids
* weights: a 1xN list of the kpoint weights (will not be present if
type = 'gamma' or type = 'automatic')
* offset: a 1x3 list of the grid offsets in each direction of the
Brillouin zone (only present if type = 'automatic')
(**Note:** The offset value for each direction will be *one of*
``0.0`` [no offset] *or* ``0.5`` [offset by half a grid step].
This differs from the Quantum Espresso convention, where an offset
value of ``1`` corresponds to a half-grid-step offset, but adheres
to the current AiiDA convention.
Examples::
{'type': 'crystal',
'points': [[0.125, 0.125, 0.0],
[0.125, 0.375, 0.0],
[0.375, 0.375, 0.0]],
'weights': [1.0, 2.0, 1.0]}
{'type': 'automatic',
'points': [8, 8, 8],
'offset': [0.0, 0.5, 0.0]}
{'type': 'gamma'}
* ``atomic_species``:
A dictionary with
* names: list of the atom names (e.g. 'Si', 'Si0', 'Si_0') (case
as-is)
* masses: list of the masses of the atoms in 'names'
* pseudo_file_names: list of the pseudopotential file names for the
atoms in 'names' (case as-is)
Example::
{'names': ['Li', 'O', 'Al', 'Si'],
'masses': [6.941, 15.9994, 26.98154, 28.0855],
'pseudo_file_names': ['Li.pbe-sl-rrkjus_psl.1.0.0.UPF',
'O.pbe-nl-rrkjus_psl.1.0.0.UPF',
'Al.pbe-nl-rrkjus_psl.1.0.0.UPF',
'Si3 28.0855 Si.pbe-nl-rrkjus_psl.1.0.0.UPF']
"""
def __init__(self,
content,
*,
qe_version=None,
validate_species_names=True):
"""
Parse inputs's namelist and cards to create attributes of the info.
:param content: A single string containing the content file's text.
:type content: str
:param qe_version: A string defining which version of QuantumESPRESSO
the input file is used for. This is used in cases where different
QE versions handle the input differently.
If no version is specified, it will default to the latest
implemented version.
The string must comply with the PEP440 versioning scheme.
Valid version strings are e.g. '6.5', '6.4.1', '6.4rc2'.
:type qe_version: Optional[str]
:param validate_species_names: A boolean flag (default: True) to enable
the consistency check between atom names and species names inferred
from the pseudopotential file name.
:type validate_species_names: bool
:raises TypeError: if ``content`` is not a string.
:raises qe_tools.utils.exceptions.ParsingError: if there are issues
parsing the content.
"""
if not isinstance(content, str):
raise TypeError("Unknown type for input 'content': {}".format(
type(content)))
self._input_txt = content
self._qe_version = parse_version(qe_version)
# Check that content is not empty.
if len(self._input_txt.strip()) == 0:
raise ParsingError('The content provided was empty!')
# Convert all types of newlines to '\n'
self._input_txt = '\n'.join(self._input_txt.splitlines())
# Add a newline, as a partial fix to #15
self._input_txt += "\n"
# Parse the namelists.
self.namelists = _parse_namelists(self._input_txt)
# Parse the ATOMIC_POSITIONS card.
self.atomic_positions = parse_atomic_positions(self._input_txt)
# Parse the CELL_PARAMETERS card.
self.cell_parameters = _parse_cell_parameters(self._input_txt)
# Parse the ATOMIC_SPECIES card.
self.atomic_species = _parse_atomic_species(
self._input_txt, validate_species_names=validate_species_names)
self.structure = _parse_structure(
txt=self._input_txt,
namelists=self.namelists,
atomic_positions=self.atomic_positions,
atomic_species=self.atomic_species,
cell_parameters=self.cell_parameters,
qe_version=self._qe_version)
def _str2val(valstr):
"""
Return a python value by converting valstr according to f90 syntax.
:param valstr: String representation of the variable to be converted.
(e.g. '.true.')
:type valstr: str
:return: A python variable corresponding to valstr.
:rtype: bool or float or int or str
:raises: ValueError: if a suitable conversion of ``valstr`` cannot be found.
"""
# Define regular expression for matching floats.
float_re = re.compile(
r"""
[-+]? # optional sign
(?: # either
\d*[\.]\d+ # 10.53 or .53
| # or
\d+[\.]?\d* ) # 10.53 or 10. or 10
(?:[dEeE][-+]?[0-9]+)? # optional exponent
""", re.X)
# Strip any white space characters before analyzing.
valstr = valstr.strip()
# Define a tuple of regular expressions to match and their corresponding
# conversion functions.
re_fn_tuple = ((re.compile(r"[.](true|t)[.]", re.I),
lambda s: True), (re.compile(r"[.](false|f)[.]",
re.I), lambda s: False),
(float_re,
lambda s: float(s.replace('d', 'e').replace('D', 'E'))),
(re.compile(r"[-+]?\d+$"),
int), (re.compile(r"""['"].+['"]"""),
lambda s: str(s.strip("\'\""))))
# Convert valstr to a value.
val = None
for regex, conversion_fn in re_fn_tuple:
# If valstr matches the regular expression, convert it with
# conversion_fn.
if regex.match(valstr):
try:
val = conversion_fn(valstr)
except ValueError as error:
raise ValueError('Error converting {} to a value'.format(
repr(valstr))) from error
if val is None:
raise ValueError('Unable to convert {} to a python variable.\n'
'NOTE: Support for algebraic expressions is not yet '
'implemented.'.format(repr(valstr)))
return val
def _parse_namelists(txt):
"""
Parse txt to extract a dictionary of the namelist info.
:param txt: A single string containing the QE input text to be parsed.
:type txt: str
:returns:
A nested dictionary of the namelists and their key-value pairs. The
namelists will always be upper-case keys, while the parameter keys will
always be lower-case.
For example::
{"CONTROL": {"calculation": "bands",
"prefix": "al",
"pseudo_dir": "./pseudo",
"outdir": "./out"},
"ELECTRONS": {"diagonalization": "cg"},
"SYSTEM": {"nbnd": 8,
"ecutwfc": 15.0,
"celldm(1)": 7.5,
"ibrav": 2,
"nat": 1,
"ntyp": 1}
}
:raises qe_tools.utils.exceptions.ParsingError: if there are issues
parsing the input.
"""
# TODO: Incorporate support for algebraic expressions?
# Define the re to match a namelist and extract the info from it.
namelist_re = re.compile(
r"""
^ [ \t]* &(\S+) [ \t]* $\n # match line w/ nmlst tag; save nmlst name
(
[\S\s]*? # match any line non-greedily
) # save the group of text between nmlst
^ [ \t]* / [ \t]* $\n # match line w/ "/" as only non-whitespace char
""", re.M | re.X)
# Define the re to match and extract all of the key = val pairs inside
# a block of namelist text.
key_value_re = re.compile(
r"""
[ \t]* (\S+?) [ \t]* # match and store key
= # equals sign separates key and value
[ \t]* (\S+?) [ \t]* # match and store value
[\n,] # return or comma separates "key = value" pairs
""", re.M | re.X)
# Scan through the namelists...
params_dict = {}
for nmlst, blockstr in namelist_re.findall(txt):
# ...extract the key value pairs, storing them each in nmlst_dict,...
nmlst_dict = {}
# I split the lines, putting back a \n at the end (I want
# to have it otherwise lines not ending with a comma are ignored)
blocklines = blockstr.splitlines()
# Remove comments on each line, and then put back the \n
# Note that strip_comment does not want \n in the string!
blocklines = [
"{}\n".format(_strip_comment(line)) for line in blocklines
]
for blockline in blocklines:
for key, valstr in key_value_re.findall(blockline):
if key.lower() in nmlst_dict:
raise ValueError(
"Key {} found more than once | |
def __init__(
self,
*,
id: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
auth_methods: Optional[List[Union[str, "MicrosoftGraphRegistrationAuthMethod"]]] = None,
is_capable: Optional[bool] = None,
is_enabled: Optional[bool] = None,
is_mfa_registered: Optional[bool] = None,
is_registered: Optional[bool] = None,
user_display_name: Optional[str] = None,
user_principal_name: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphCredentialUserRegistrationDetails, self).__init__(id=id, **kwargs)
self.additional_properties = additional_properties
self.auth_methods = auth_methods
self.is_capable = is_capable
self.is_enabled = is_enabled
self.is_mfa_registered = is_mfa_registered
self.is_registered = is_registered
self.user_display_name = user_display_name
self.user_principal_name = user_principal_name
class MicrosoftGraphDeviceHealth(msrest.serialization.Model):
"""deviceHealth.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param last_connection_time:
:type last_connection_time: ~datetime.datetime
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'last_connection_time': {'key': 'lastConnectionTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
last_connection_time: Optional[datetime.datetime] = None,
**kwargs
):
super(MicrosoftGraphDeviceHealth, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.last_connection_time = last_connection_time
class MicrosoftGraphDirectoryObject(MicrosoftGraphEntity):
"""Represents an Azure Active Directory object. The directoryObject type is the base type for many other directory entity types.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param deleted_date_time:
:type deleted_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'deleted_date_time': {'key': 'deletedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
id: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
deleted_date_time: Optional[datetime.datetime] = None,
**kwargs
):
super(MicrosoftGraphDirectoryObject, self).__init__(id=id, **kwargs)
self.additional_properties = additional_properties
self.deleted_date_time = deleted_date_time
class MicrosoftGraphGroupPrintUsageSummary(msrest.serialization.Model):
"""groupPrintUsageSummary.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param completed_job_count:
:type completed_job_count: int
:param group: identity.
:type group: ~devices_cloud_print.models.MicrosoftGraphIdentity
:param group_display_name:
:type group_display_name: str
:param group_mail:
:type group_mail: str
:param incomplete_job_count:
:type incomplete_job_count: int
"""
_validation = {
'completed_job_count': {'maximum': 2147483647, 'minimum': -2147483648},
'incomplete_job_count': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'completed_job_count': {'key': 'completedJobCount', 'type': 'int'},
'group': {'key': 'group', 'type': 'MicrosoftGraphIdentity'},
'group_display_name': {'key': 'groupDisplayName', 'type': 'str'},
'group_mail': {'key': 'groupMail', 'type': 'str'},
'incomplete_job_count': {'key': 'incompleteJobCount', 'type': 'int'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
completed_job_count: Optional[int] = None,
group: Optional["MicrosoftGraphIdentity"] = None,
group_display_name: Optional[str] = None,
group_mail: Optional[str] = None,
incomplete_job_count: Optional[int] = None,
**kwargs
):
super(MicrosoftGraphGroupPrintUsageSummary, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.completed_job_count = completed_job_count
self.group = group
self.group_display_name = group_display_name
self.group_mail = group_mail
self.incomplete_job_count = incomplete_job_count
class MicrosoftGraphIdentity(msrest.serialization.Model):
"""identity.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param display_name: The identity's display name. Note that this may not always be available or
up to date. For example, if a user changes their display name, the API may show the new value
in a future response, but the items associated with the user won't show up as having changed
when using delta.
:type display_name: str
:param id: Unique identifier for the identity.
:type id: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'display_name': {'key': 'displayName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
display_name: Optional[str] = None,
id: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphIdentity, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.display_name = display_name
self.id = id
class MicrosoftGraphIntegerRange(msrest.serialization.Model):
"""integerRange.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param end:
:type end: long
:param maximum:
:type maximum: long
:param minimum:
:type minimum: long
:param start:
:type start: long
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'end': {'key': 'end', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'minimum': {'key': 'minimum', 'type': 'long'},
'start': {'key': 'start', 'type': 'long'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
end: Optional[int] = None,
maximum: Optional[int] = None,
minimum: Optional[int] = None,
start: Optional[int] = None,
**kwargs
):
super(MicrosoftGraphIntegerRange, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.end = end
self.maximum = maximum
self.minimum = minimum
self.start = start
class MicrosoftGraphOverallPrintUsageSummary(msrest.serialization.Model):
"""overallPrintUsageSummary.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param active_printers_count:
:type active_printers_count: int
:param active_users_count:
:type active_users_count: int
:param days_in_period:
:type days_in_period: int
:param top_printers:
:type top_printers: list[~devices_cloud_print.models.MicrosoftGraphPrinterUsageSummary]
:param top_users:
:type top_users: list[~devices_cloud_print.models.MicrosoftGraphUserPrintUsageSummary]
:param total_incomplete_jobs:
:type total_incomplete_jobs: int
:param total_jobs_processed:
:type total_jobs_processed: int
"""
_validation = {
'active_printers_count': {'maximum': 2147483647, 'minimum': -2147483648},
'active_users_count': {'maximum': 2147483647, 'minimum': -2147483648},
'days_in_period': {'maximum': 2147483647, 'minimum': -2147483648},
'total_incomplete_jobs': {'maximum': 2147483647, 'minimum': -2147483648},
'total_jobs_processed': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'active_printers_count': {'key': 'activePrintersCount', 'type': 'int'},
'active_users_count': {'key': 'activeUsersCount', 'type': 'int'},
'days_in_period': {'key': 'daysInPeriod', 'type': 'int'},
'top_printers': {'key': 'topPrinters', 'type': '[MicrosoftGraphPrinterUsageSummary]'},
'top_users': {'key': 'topUsers', 'type': '[MicrosoftGraphUserPrintUsageSummary]'},
'total_incomplete_jobs': {'key': 'totalIncompleteJobs', 'type': 'int'},
'total_jobs_processed': {'key': 'totalJobsProcessed', 'type': 'int'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
active_printers_count: Optional[int] = None,
active_users_count: Optional[int] = None,
days_in_period: Optional[int] = None,
top_printers: Optional[List["MicrosoftGraphPrinterUsageSummary"]] = None,
top_users: Optional[List["MicrosoftGraphUserPrintUsageSummary"]] = None,
total_incomplete_jobs: Optional[int] = None,
total_jobs_processed: Optional[int] = None,
**kwargs
):
super(MicrosoftGraphOverallPrintUsageSummary, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.active_printers_count = active_printers_count
self.active_users_count = active_users_count
self.days_in_period = days_in_period
self.top_printers = top_printers
self.top_users = top_users
self.total_incomplete_jobs = total_incomplete_jobs
self.total_jobs_processed = total_jobs_processed
class MicrosoftGraphPrint(msrest.serialization.Model):
"""print.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param settings: printSettings.
:type settings: ~devices_cloud_print.models.MicrosoftGraphPrintSettings
:param connectors:
:type connectors: list[~devices_cloud_print.models.MicrosoftGraphPrintConnector]
:param operations:
:type operations: list[~devices_cloud_print.models.MicrosoftGraphPrintOperation]
:param printers:
:type printers: list[~devices_cloud_print.models.MicrosoftGraphPrinter]
:param printer_shares:
:type printer_shares: list[~devices_cloud_print.models.MicrosoftGraphPrinterShare]
:param reports:
:type reports: list[~devices_cloud_print.models.MicrosoftGraphReportRoot]
:param services:
:type services: list[~devices_cloud_print.models.MicrosoftGraphPrintService]
:param shares:
:type shares: list[~devices_cloud_print.models.MicrosoftGraphPrinterShare]
:param task_definitions:
:type task_definitions: list[~devices_cloud_print.models.MicrosoftGraphPrintTaskDefinition]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'settings': {'key': 'settings', 'type': 'MicrosoftGraphPrintSettings'},
'connectors': {'key': 'connectors', 'type': '[MicrosoftGraphPrintConnector]'},
'operations': {'key': 'operations', 'type': '[MicrosoftGraphPrintOperation]'},
'printers': {'key': 'printers', 'type': '[MicrosoftGraphPrinter]'},
'printer_shares': {'key': 'printerShares', 'type': '[MicrosoftGraphPrinterShare]'},
'reports': {'key': 'reports', 'type': '[MicrosoftGraphReportRoot]'},
'services': {'key': 'services', 'type': '[MicrosoftGraphPrintService]'},
'shares': {'key': 'shares', 'type': '[MicrosoftGraphPrinterShare]'},
'task_definitions': {'key': 'taskDefinitions', 'type': '[MicrosoftGraphPrintTaskDefinition]'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
settings: Optional["MicrosoftGraphPrintSettings"] = None,
connectors: Optional[List["MicrosoftGraphPrintConnector"]] = None,
operations: Optional[List["MicrosoftGraphPrintOperation"]] = None,
printers: Optional[List["MicrosoftGraphPrinter"]] = None,
printer_shares: Optional[List["MicrosoftGraphPrinterShare"]] = None,
reports: Optional[List["MicrosoftGraphReportRoot"]] = None,
services: Optional[List["MicrosoftGraphPrintService"]] = None,
shares: Optional[List["MicrosoftGraphPrinterShare"]] = None,
task_definitions: Optional[List["MicrosoftGraphPrintTaskDefinition"]] = None,
**kwargs
):
super(MicrosoftGraphPrint, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.settings = settings
self.connectors = connectors
self.operations = operations
self.printers = printers
self.printer_shares = printer_shares
self.reports = reports
self.services = services
self.shares = shares
self.task_definitions = task_definitions
class MicrosoftGraphPrintCertificateSigningRequest(msrest.serialization.Model):
"""printCertificateSigningRequest.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param content:
:type content: str
:param transport_key:
:type transport_key: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'content': {'key': 'content', 'type': 'str'},
'transport_key': {'key': 'transportKey', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
content: Optional[str] = None,
transport_key: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphPrintCertificateSigningRequest, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.content = content
self.transport_key = transport_key
class MicrosoftGraphPrintConnector(MicrosoftGraphEntity):
"""printConnector.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param app_version:
:type app_version: str
:param device_health: deviceHealth.
:type device_health: ~devices_cloud_print.models.MicrosoftGraphDeviceHealth
:param display_name:
:type display_name: str
:param fully_qualified_domain_name:
:type fully_qualified_domain_name: str
:param location: printerLocation.
:type location: ~devices_cloud_print.models.MicrosoftGraphPrinterLocation
:param name:
:type name: str
:param operating_system:
:type operating_system: str
:param registered_date_time:
:type registered_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'app_version': {'key': 'appVersion', 'type': 'str'},
'device_health': {'key': 'deviceHealth', 'type': 'MicrosoftGraphDeviceHealth'},
'display_name': {'key': 'displayName', 'type': 'str'},
'fully_qualified_domain_name': {'key': 'fullyQualifiedDomainName', 'type': 'str'},
'location': {'key': 'location', 'type': 'MicrosoftGraphPrinterLocation'},
'name': {'key': 'name', 'type': 'str'},
'operating_system': {'key': 'operatingSystem', 'type': 'str'},
'registered_date_time': {'key': 'registeredDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
id: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
app_version: Optional[str] = None,
device_health: Optional["MicrosoftGraphDeviceHealth"] = None,
display_name: Optional[str] = None,
fully_qualified_domain_name: Optional[str] = None,
location: Optional["MicrosoftGraphPrinterLocation"] = None,
name: Optional[str] = None,
operating_system: Optional[str] = None,
registered_date_time: Optional[datetime.datetime] = None,
**kwargs
):
super(MicrosoftGraphPrintConnector, self).__init__(id=id, **kwargs)
self.additional_properties = additional_properties
self.app_version = app_version
self.device_health = device_health
self.display_name = display_name
self.fully_qualified_domain_name = fully_qualified_domain_name
self.location = location
self.name = name
self.operating_system = | |
<filename>openmdao/utils/file_wrap.py
"""
A collection of utilities for file wrapping.
Note: This is a work in progress.
"""
import re
from pyparsing import CaselessLiteral, Combine, OneOrMore, Optional, \
TokenConverter, Word, nums, oneOf, printables, ParserElement, alphanums
import numpy as np
def _getformat(val):
"""
Get the output format for a floating point number.
The general format is used with 16 places of accuracy, except for when
the floating point value is an integer, in which case a decimal point
followed by a single zero is used.
Parameters
----------
val : float or int
the number which needs formatted.
Returns
-------
string
the format string.
"""
if int(val) == val:
return "%.1f"
else:
return "%.16g"
class _SubHelper(object):
"""
Replaces file text at the correct word location in a line.
This class contains the Helper Function that is passed to re.sub.
Attributes
----------
_newtext : str
text to insert.
_replace_location : int
location in the file where replacement is to occur.
_current_location : int
current location in the file.
_counter : int
counter
_start_location : int
initial location where replacement is to occur.
_end_location : int
final location where replacement is to occur.
"""
def __init__(self):
"""
Initialize attributes.
"""
self._newtext = ""
self._replace_location = 0
self._current_location = 0
self._counter = 0
self._start_location = 0
self._end_location = 0
def set(self, newtext, location):
"""
Set a new word location and value for replacement.
Parameters
----------
newtext : str
text to insert.
location : int
location in the file where replacement is to occur.
"""
self._newtext = newtext
self._replace_location = location
self._current_location = 0
def set_array(self, newtext, start_location, end_location):
"""
Set a new starting location, ending location, and value for replacement.
Parameters
----------
newtext : str
text to insert.
start_location : int
location
end_location : int
location
"""
self._newtext = newtext
self._start_location = start_location
self._end_location = end_location
self._current_location = 0
def replace(self, text):
"""
Replace text in file.
This function should be passed to re.sub.
Parameters
----------
text : str
text to insert.
Returns
-------
string
newtext if current location is replace location else the input text.
"""
self._current_location += 1
if self._current_location == self._replace_location:
if isinstance(self._newtext, float):
return _getformat(self._newtext) % self._newtext
else:
return str(self._newtext)
else:
return text.group()
def replace_array(self, text):
"""
Replace array of text values in file.
This function should be passed to re.sub.
Parameters
----------
text : str
text to insert.
Returns
-------
string
newtext if current location is replace location else the input text.
"""
self._current_location += 1
end = len(self._newtext)
if self._current_location >= self._start_location and \
self._current_location <= self._end_location and \
self._counter < end:
if isinstance(self._newtext[self._counter], float):
val = self._newtext[self._counter]
newval = _getformat(val) % val
else:
newval = str(self._newtext[self._counter])
self._counter += 1
return newval
else:
return text.group()
class _ToInteger(TokenConverter):
"""
Converter for PyParsing that is used to turn a token into an int.
"""
def postParse(self, instring, loc, tokenlist):
"""
Convert token into an integer.
Parameters
----------
instring : str
the input string
loc : int
the location of the matching string
tokenlist : list
list of matched tokens
Returns
-------
int
integer value for token.
"""
return int(tokenlist[0])
class _ToFloat(TokenConverter):
"""
Converter for PyParsing that is used to turn a token into a float.
"""
def postParse(self, instring, loc, tokenlist):
"""
Convert token into a float.
Parameters
----------
instring : str
the input string
loc : int
the location of the matching string
tokenlist : list
list of matched tokens
Returns
-------
float
float value for token.
"""
return float(tokenlist[0].replace('D', 'E'))
class _ToNan(TokenConverter):
"""
Converter for PyParsing that is used to turn a token into Python nan.
"""
def postParse(self, instring, loc, tokenlist):
"""
Convert token into Python nan.
Parameters
----------
instring : str
the input string
loc : int
the location of the matching string
tokenlist : list
list of matched tokens
Returns
-------
float
the float value for NaN.
"""
return float('nan')
class _ToInf(TokenConverter):
"""
Converter for PyParsing that is used to turn a token into Python inf.
"""
def postParse(self, instring, loc, tokenlist):
"""
Convert token into Python inf.
Parameters
----------
instring : str
the input string
loc : int
the location of the matching string
tokenlist : list
list of matched tokens
Returns
-------
float
the float value for infinity.
"""
return float('inf')
class InputFileGenerator(object):
"""
Utility to generate an input file from a template.
Substitution of values is supported. Data is located with a simple API.
Attributes
----------
_template_filename : str or None
the name of the template file.
_output_filename : str or None
the name of the output file.
_delimiter : int
delimiter.
_reg : int
regular expression.
_data : list of string
the contents of the file, by line
_current_row : int
the current row of the file
_anchored : bool
indicator that position is relative to a landmark location.
"""
def __init__(self):
"""
Initialize attributes.
"""
self._template_filename = None
self._output_filename = None
self._delimiter = " "
self._reg = re.compile('[^ \n]+')
self._data = []
self._current_row = 0
self._anchored = False
def set_template_file(self, filename):
"""
Set the name of the template file to be used.
The template file is also read into memory when this method is called.
Parameters
----------
filename : str
Name of the template file to be used.
"""
self._template_filename = filename
templatefile = open(filename, 'r')
self._data = templatefile.readlines()
templatefile.close()
def set_generated_file(self, filename):
"""
Set the name of the file that will be generated.
Parameters
----------
filename : str
Name of the input file to be generated.
"""
self._output_filename = filename
def set_delimiters(self, delimiter):
"""
Set the delimiters that are used to identify field boundaries.
Parameters
----------
delimiter : str
A string containing characters to be used as delimiters.
"""
self._delimiter = delimiter
self._reg = re.compile('[^' + delimiter + '\n]+')
def mark_anchor(self, anchor, occurrence=1):
"""
Mark the location of a landmark.
This lets you describe data by relative position. Note that a forward
search begins at the old anchor location. If you want to restart the
search for the anchor at the file beginning, then call ``reset_anchor()``
before ``mark_anchor``.
Parameters
----------
anchor : str
The text you want to search for.
occurrence : int, optional
Find nth instance of text; default is 1 (first). Use -1 to
find last occurrence. Reverse searches always start at the end
of the file no matter the state of any previous anchor.
"""
if not isinstance(occurrence, int):
raise ValueError("The value for occurrence must be an integer")
instance = 0
if occurrence > 0:
count = 0
max_lines = len(self._data)
for index in range(self._current_row, max_lines):
line = self._data[index]
# If we are marking a new anchor from an existing anchor, and
# the anchor is mid-line, then we still search the line, but
# only after the anchor.
if count == 0 and self._anchored:
line = line.split(anchor)[-1]
if line.find(anchor) > -1:
instance += 1
if instance == occurrence:
self._current_row += count
self._anchored = True
return
count += 1
elif occurrence < 0:
max_lines = len(self._data) - 1
count = max_lines
for index in range(max_lines, -1, -1):
line = self._data[index]
# If we are marking a new anchor from an existing anchor, and
# the anchor is mid-line, then we still search the line, but
# only before the anchor.
if count == max_lines and self._anchored:
line = line.split(anchor)[0]
if line.find(anchor) > -1:
instance += -1
if instance == occurrence:
self._current_row = count
self._anchored = True
return
count -= 1
else:
raise ValueError("0 is not valid for an anchor occurrence.")
raise RuntimeError("Could not find pattern %s in template file %s" %
(anchor, self._template_filename))
def reset_anchor(self):
"""
Reset anchor to the beginning of the file.
"""
self._current_row = 0
self._anchored = False
def transfer_var(self, value, row, field):
"""
Change a single variable in the template relative to the current anchor.
Parameters
----------
value : float, int, bool, str
New value to set at the location.
row : int
Number of lines offset from | |
"""Test Contract() with no mining delay class"""
# Uses ganache with automining set to 'on'
import pytest
from simpleth import Blockchain, Contract, SimplEthError, Results
import testconstants as constants
class TestContractConstructorGood:
"""Test case for Contract() with good args"""
def test_constructor_with_good_contract_name(self):
"""Instantiate Contract() object with valid constructor arg"""
assert Contract(constants.CONTRACT_NAME)._name is constants.CONTRACT_NAME
class TestContractConstructorBad:
"""Test cases for Contract() with bad args"""
def test_constructor_with_bad_contract_name_raises_C_100_010(self):
"""SimplEthError is raised when constructor has bad contract name"""
bad_name = 'bad_contract_name'
with pytest.raises(SimplEthError) as excp:
Contract(bad_name)
assert excp.value.code == 'C-100-010'
# noinspection PyArgumentList
def test_constructor_with_missing_contract_name_raises_type_error(self):
"""TypeError is raised when constructor has no contract name"""
with pytest.raises(TypeError):
Contract()
@pytest.mark.usefixtures('connect_to_test_contract')
class TestContractDeployGood:
"""Test cases for Contract().deploy() with good test cases"""
def test_deploy_with_good_args(self, connect_to_test_contract):
"""deploy() returns results with correct transaction name"""
c = Contract(constants.CONTRACT_NAME)
receipt = c.deploy(constants.CONSTRUCTOR_SENDER, constants.CONSTRUCTOR_ARG)
results = Results(c, receipt)
assert results.trx_name == 'deploy'
def test_deploy_with_good_args_plus_gas_limit(
self,
connect_to_test_contract
):
"""deploy() with typical set of args plus specifying a gas limit
large enough for the trx returns result for deploy trx"""
c = Contract(constants.CONTRACT_NAME)
receipt = c.deploy(
constants.CONSTRUCTOR_SENDER,
constants.CONSTRUCTOR_ARG,
gas_limit=constants.CONSTRUCTOR_GAS_LIMIT
)
results = Results(c, receipt)
assert results.trx_name == 'deploy'
def test_deploy_with_good_args_plus_gas_limit_and_fees(
self,
connect_to_test_contract
):
"""deploy() with typical set of args plus specifying a gas limit
large enough for the trx plus reasonable values for fees returns
result for deploy trx"""
c = Contract(constants.CONTRACT_NAME)
receipt = c.deploy(
constants.CONSTRUCTOR_SENDER,
constants.CONSTRUCTOR_ARG,
gas_limit=constants.CONSTRUCTOR_GAS_LIMIT,
max_priority_fee_gwei=constants.MAX_PRIORITY_FEE_GWEI,
max_fee_gwei=constants.MAX_FEE_GWEI
)
results = Results(c, receipt)
assert results.trx_name == 'deploy'
@pytest.mark.usefixtures(
'construct_test_contract',
'construct_never_deployed_test_contract'
)
class TestContractDeployBad:
"""Test cases for Contract().deploy() with bad values"""
# Not testing bad values for ``max_priority_fee_gwei`` and
# ``max_fee_gwei``. Neither of these are not yet supported
# by Ganache. Add tests later once Ganache has support
# for them.
def test_deploy_with_bad_sender_raises_C_030_020(
self,
construct_test_contract
):
""""Attempt to deploy with bad sender raises C-030-020"""
c = construct_test_contract
bad_sender = '123'
with pytest.raises(SimplEthError) as excp:
c.deploy(bad_sender, constants.CONSTRUCTOR_ARG)
assert excp.value.code == 'C-030-020'
def test_deploy_with_wrong_type_constructor_arg_raises_C_030_030(
self,
construct_test_contract
):
""""Attempt to deploy with bad constructor arg type raises
C-030-030"""
c = construct_test_contract
bad_constructor_arg = '123'
with pytest.raises(SimplEthError) as excp:
c.deploy(constants.CONSTRUCTOR_SENDER, bad_constructor_arg)
assert excp.value.code == 'C-030-030'
def test_deploy_with_too_many_constructor_args_raises_C_030_030(
self,
construct_test_contract
):
""""Attempt to deploy with too many constructor args raises
C-030-030"""
c = construct_test_contract
extra_constructor_arg = 20
with pytest.raises(SimplEthError) as excp:
c.deploy(
constants.CONSTRUCTOR_SENDER,
constants.CONSTRUCTOR_ARG,
extra_constructor_arg
)
assert excp.value.code == 'C-030-030'
def test_deploy_with_missing_constructor_arg_raises_C_030_030(
self,
construct_test_contract
):
""""Attempt to deploy with missing constructor arg raises
C-030-030"""
c = construct_test_contract
with pytest.raises(SimplEthError) as excp:
c.deploy(constants.CONSTRUCTOR_SENDER)
assert excp.value.code == 'C-030-030'
def test_deploy_with_insufficient_gas_raises_C_030_040(
self,
construct_test_contract
):
""""Attempt to deploy with gas limit arg too small to
run the trx raises C-030-030"""
c = construct_test_contract
insufficient_gas_limit = constants.GAS_LIMIT_MIN
with pytest.raises(SimplEthError) as excp:
c.deploy(
constants.CONSTRUCTOR_SENDER,
constants.CONSTRUCTOR_ARG,
gas_limit=insufficient_gas_limit
)
assert excp.value.code == 'C-030-040'
def test_deploy_with_excessive_gas_raises_C_030_040(
self,
construct_test_contract
):
""""Attempt to deploy with missing constructor arg raises
C-030-030"""
c = construct_test_contract
excessive_gas_limit = constants.GAS_LIMIT_MAX + 1
with pytest.raises(SimplEthError) as excp:
c.deploy(
constants.CONSTRUCTOR_SENDER,
constants.CONSTRUCTOR_ARG,
gas_limit=excessive_gas_limit
)
assert excp.value.code == 'C-030-040'
@pytest.mark.usefixtures('construct_test_contract')
class TestContractConnectBad:
"""Test cases for Contract().connect() with bad values"""
# The good test case has already been run in fixtures
# and elsewhere. No test class TestContractConnectGood.
def test_connect_without_args(self, construct_test_contract):
"""Test normal, expected use. Should pass."""
c = construct_test_contract
contract_address = c.connect()
assert Blockchain().is_valid_address(contract_address)
def test_connect_with_unexpected_arg_raises_type_error(
self,
construct_test_contract
):
"""Test bad use of putting in an arg. Should raise TypeError."""
c = construct_test_contract
unexpected_arg = 'bad_arg'
with pytest.raises(TypeError):
c.connect(unexpected_arg)
@pytest.mark.usefixtures('connect_to_test_contract')
class TestContractPropertiesGood:
"""Test cases for Contract() properties"""
# There are no bad test cases. There are no args for
# any of these properties.
def test_address(self, connect_to_test_contract):
"""Test contract address is a valid address"""
c = connect_to_test_contract
assert Blockchain().is_valid_address(c.address)
def test_artifact_dir(self, connect_to_test_contract):
"""Test contract address is a valid address"""
c = connect_to_test_contract
assert isinstance(c.artifact_dir, str) and \
len(c.artifact_dir) > 0
def test_blockchain(self, connect_to_test_contract):
"""Test blockchain is a Blockchain() object"""
c = connect_to_test_contract
assert isinstance(c.blockchain, Blockchain)
def test_bytecode(self, connect_to_test_contract):
"""Test bytecode is a string"""
c = connect_to_test_contract
assert isinstance(c.bytecode, str)
def test_deployed_code(self, connect_to_test_contract):
"""Test deployed_code is a string"""
c = connect_to_test_contract
assert isinstance(c.deployed_code, str)
def test_events(self, connect_to_test_contract):
"""Test events is a list"""
c = connect_to_test_contract
assert isinstance(c.event_names, list)
def test_functions(self, connect_to_test_contract):
"""Test functions is a list"""
c = connect_to_test_contract
assert isinstance(c.functions, list)
def test_name(self, connect_to_test_contract):
"""Test name is the test contract name"""
c = connect_to_test_contract
assert c.name is constants.CONTRACT_NAME
def test_size(self, connect_to_test_contract):
"""Test size is an integer"""
c = connect_to_test_contract
assert isinstance(c.size, int)
def test_web3_contract(self, connect_to_test_contract):
"""Test web3_contract is an object"""
c = connect_to_test_contract
assert isinstance(c.web3_contract, object)
def test_web3e(self, connect_to_test_contract):
"""Test web3e is an object"""
c = connect_to_test_contract
assert isinstance(c.web3e, object)
@pytest.mark.usefixtures('deploy_test_contract')
class TestCallFcnGood:
"""Test cases for Contract().call_fcn() with good values"""
# Safest to deploy a new contract every time to insure we get the
# expected initialization values.
def test_call_fcn_getNum0(self, deploy_test_contract):
"""Test call_fcn with function that has no args and returns
a single value"""
c = deploy_test_contract
assert c.call_fcn('getNum0') == constants.INIT_NUM0
def test_call_fcn_getNums(self, deploy_test_contract):
"""Test call_fcn with function that returns a row from an
array"""
c = deploy_test_contract
expected_nums = [
constants.INIT_NUM0,
constants.INIT_NUM1,
constants.INIT_NUM2
]
assert c.call_fcn('getNums') == expected_nums
def test_call_fcn_getNum(self, deploy_test_contract):
"""Test call_fcn with function that has an arg"""
c = deploy_test_contract
assert c.call_fcn('getNum', 2) == constants.INIT_NUM2
def test_call_fcn_getTypes(self, deploy_test_contract):
"""Test call_fcn to get multiple values returned"""
# Since storeTypes() has not been called. The values
# returned are all initialized to default values.
# Just check the number of values returned.
c = deploy_test_contract
assert len(c.call_fcn('getTypes')) == 7
@pytest.mark.usefixtures(
'construct_never_deployed_test_contract',
'connect_to_test_contract'
)
class TestCallFcnBad:
"""Test cases for Contract().call_fcn() with bad values"""
# OK to just do connect() instead of deploy(). These tests
# all give bad args so doesn't matter that we have a fresh
# contract.
# I don't know how to create the error that causes the
# exception with code of C-010-020.
def test_call_fcn_with_no_fcn_name_raises_type_error(
self,
connect_to_test_contract
):
""""Attempt to call_fcn with missing fcn_name fails"""
c = connect_to_test_contract
with pytest.raises(TypeError):
c.call_fcn()
def test_call_fcn_with_bad_fcn_name_raises_C_010_010(
self,
connect_to_test_contract
):
""""Attempt to call_fcn with bad fcn_name raises C-010-010"""
c = connect_to_test_contract
bad_fcn_name = 'bad'
with pytest.raises(SimplEthError) as excp:
c.call_fcn(bad_fcn_name)
assert excp.value.code == 'C-010-010'
def test_call_fcn_with_unconnected_contract_raises_C_010_010(
self,
construct_never_deployed_test_contract
):
"""Test call_fcn() fails if connect() is needed."""
c = construct_never_deployed_test_contract
with pytest.raises(SimplEthError) as excp:
c.call_fcn('getNum0')
assert excp.value.code == 'C-010-010'
def test_call_fcn_with_bad_arg_type_raises_C_010_020(
self,
connect_to_test_contract
):
""""Attempt to call_fcn with bad arg type fails"""
c = connect_to_test_contract
bad_arg_type = 'bad'
with pytest.raises(SimplEthError) as excp:
c.call_fcn('getNum', bad_arg_type)
assert excp.value.code == 'C-010-020'
def test_call_fcn_with_wrong_num_args_raises_C_010_020(
self,
connect_to_test_contract
):
""""Attempt to call_fcn with bad number of args fails"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.call_fcn('getNum', 1, 2)
assert excp.value.code == 'C-010-020'
def test_call_fcn_with_out_of_bounds_arg_raises_C_010_040(
self,
connect_to_test_contract
):
""""Attempt to call_fcn with an out of bounds arg fails"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.call_fcn('getNum', 3)
assert excp.value.code == 'C-010-040'
@pytest.mark.usefixtures('connect_to_test_contract')
class TestContractGetGasEstimateGood:
"""Test cases for Contract().get_gas_estimate() with good values"""
def test_get_gas_estimate_with_good_args(
self,
connect_to_test_contract
):
"""Test normal, expected use. Should pass."""
c = connect_to_test_contract
gas_estimate = c.get_gas_estimate(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2
)
assert gas_estimate > constants.GAS_LIMIT_MIN
@pytest.mark.usefixtures(
'connect_to_test_contract',
'construct_never_deployed_test_contract'
)
class TestContractGetGasEstimateBad:
"""Test cases for Contract().get_gas_estimate() with bad values"""
def test_get_gas_estimate_with_no_args_raises_type_error(
self,
connect_to_test_contract
):
""""Attempt to get_gas_estimate() with no args fails"""
c = connect_to_test_contract
with pytest.raises(TypeError):
c.get_gas_estimate()
def test_get_gas_estimate_with_bad_trx_name_raises_C_040_010(
self,
connect_to_test_contract
):
"""Test get_gas_estimate() with a bad trx name"""
c = connect_to_test_contract
bad_trx_name = 'bad_trx'
with pytest.raises(SimplEthError) as excp:
c.get_gas_estimate(
constants.TRX_SENDER,
bad_trx_name,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2
)
assert excp.value.code == 'C-040-010'
def test_get_gas_estimate_with_too_few_trx_args_raises_C_040_020(
self,
connect_to_test_contract
):
"""Test get_gas_estimate() with too few trx args"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.get_gas_estimate(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1
)
assert excp.value.code == 'C-040-020'
def test_get_gas_estimate_with_too_many_trx_args_raises_C_040_020(
self,
connect_to_test_contract
):
"""Test get_gas_estimate() with too many trx args"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.get_gas_estimate(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2,
constants.TRX_ARG2
)
assert excp.value.code == 'C-040-020'
def test_get_gas_estimate_with_TBD_raises_C_040_030(
self,
connect_to_test_contract
):
"""Test get_gas_estimate() for a destroyed contract
Don't know how to do this yet. Just do assert True
for now. """
assert True
def test_get_gas_estimate_with_oob_arg_raises_C_040_040(
self,
connect_to_test_contract
):
"""Test get_gas_estimate() with out-of-bounds arg"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.get_gas_estimate(
constants.OOB_TRX_SENDER,
constants.OOB_TRX_NAME,
constants.OOB_TRX_ARG0,
constants.OOB_TRX_ARG1
)
assert excp.value.code == 'C-040-040'
def test_get_gas_estimate_with_db0_arg_raises_C_040_040(
self,
connect_to_test_contract
):
"""Test get_gas_estimate() | |
#!/usr/bin/env python3
''' Program to make tests for metrics testing '''
import argparse, glob, os, pickle, re, requests, subprocess
def create_n_file(id, compare_name, desc, file_lines):
if id in all_n_ids:
exit("Found {} a second time. Exiting.".format(id))
compare_lines = p_files[compare_name]
# Check if nothing changed
if file_lines == compare_lines:
exit("Found unchanged test creation for {}".format(id))
# Check if the test does not start as expected
if not file_lines[0] == "-":
exit("First line of {} was not '-'".format(id))
# Write the file
f = open("n-{}".format(id), mode="wt")
f.write("# [{}] {}\n".format(id, desc))
for this_line in file_lines:
f.write(this_line + "\n")
f.close()
all_n_ids.append(id)
if __name__ == "__main__":
this_parser = argparse.ArgumentParser()
this_parser.add_argument("--addr", dest="addr", default="a.root-servers.net",
help="Address (IP or domain name) of root server to get tests from")
this_parser.add_argument("--bin_prefix", dest="bin_prefix", default=os.path.expanduser("~/Target"),
help="Address (IP or domain name) of root server to get tests from")
opts = this_parser.parse_args()
# Do sanity tests on --bin_prefix and --addr
dig_loc = "{}/bin/dig".format(opts.bin_prefix)
compilezone_loc = "{}/sbin/named-compilezone".format(opts.bin_prefix)
if not os.path.exists(dig_loc):
exit("Did not find {}. Exiting.".format(dig_loc))
if not os.path.exists(compilezone_loc):
exit("Did not find {}. Exiting.".format(compilezone_loc))
try:
addr_test = "{} @{} . soa >/dev/null".format(dig_loc, opts.addr)
subprocess.run(addr_test, shell=True, check=True)
except:
exit("Running '{}' failed, probably due to a bad address. Exiting".format(addr_test))
# Make a file of the root names and types to be passed to the correction checking function
# Keep track of all the records in this temporary root zone, both to find the SOA but also to save for later matching comparisons
# Get the current root zone
internic_url = "https://www.internic.net/domain/root.zone"
try:
root_zone_request = requests.get(internic_url)
except Exception as e:
exit("Could not do the requests.get on {}: '{}'".format(internic_url, e))
# Save it as a temp file to use named-compilezone
temp_latest_zone_name = "root_zone.txt"
temp_latest_zone_f = open(temp_latest_zone_name, mode="wt")
temp_latest_zone_f.write(root_zone_request.text)
temp_latest_zone_f.close()
# Give the named-compilezone command, then post-process
try:
named_compilezone_p = subprocess.run("{} -q -i none -r ignore -o - . '{}'".format(compilezone_loc, temp_latest_zone_name),
shell=True, text=True, check=True, capture_output=True)
except Exception as e:
exit("named-compilezone failed with '{}'".format(e))
new_root_text_in = named_compilezone_p.stdout
# Turn tabs into spaces
new_root_text_in = re.sub("\t", " ", new_root_text_in)
# Turn runs of spaces into a single space
new_root_text_in = re.sub(" +", " ", new_root_text_in)
# Get the output after removing comments
new_root_text = ""
# Remove the comments
for this_line in new_root_text_in.splitlines():
if not this_line.startswith(";"):
new_root_text += this_line + "\n"
# Now save the name and types data
root_name_and_types = {}
for this_line in new_root_text.splitlines():
(this_name, _, _, this_type, this_rdata) = this_line.split(" ", maxsplit=4)
this_key = "{}/{}".format(this_name, this_type)
if not this_key in root_name_and_types:
root_name_and_types[this_key] = set()
root_name_and_types[this_key].add(this_rdata)
f_out = open("root_name_and_types.pickle", mode="wb")
pickle.dump(root_name_and_types, f_out)
f_out.close()
# Template for . SOA
# dig +yaml . SOA @ADDR -4 +notcp +nodnssec +noauthority +noadditional +bufsize=1220 +nsid +norecurse +time=4 +tries=1
# Template for all other digs
# dig +yaml {} {} @ADDR -4 +notcp +dnssec +bufsize=1220 +nsid +norecurse +time=4 +tries=1 +noignore
p_template = "@{} -4 +notcp +dnssec +bufsize=1220 +nsid +norecurse +time=4 +tries=1 +noignore".format(opts.addr)
# Create the positive files
cmd_list = """
{} +yaml . SOA {} > p-dot-soa
{} +yaml . DNSKEY {} > p-dot-dnskey
{} +yaml . NS {} > p-dot-ns
{} +yaml www.rssac047-test.zyxwvutsrqp A {} > p-neg
{} +yaml us DS {} > p-tld-ds
{} +yaml us NS {} > p-tld-ns
{} +yaml cm NS {} > p-tld-ns-no-ds
{} +yaml by NS {} > p-by-ns
""".strip().splitlines()
for this_cmd in cmd_list:
subprocess.run(this_cmd.format(dig_loc, p_template), shell=True)
# Fix p-by-ns for the BIND YAML bug
all_by_lines = []
for this_line in open("p-by-ns", mode="rt"):
if this_line.endswith("::\n"):
this_line = this_line.replace("::\n", "::0\n")
all_by_lines.append(this_line)
f = open("p-by-ns", mode="wt")
f.write("".join(all_by_lines))
f.close()
# Delete all the negative files before re-creating them
for this_to_delete in glob.glob("n-*"):
try:
os.unlink(this_to_delete)
except:
exit("Stopping early because can't delete {}".format(this_to_delete))
# Read all the positive files into memory
p_file_names = '''
p-dot-soa
p-dot-dnskey
p-dot-ns
p-neg
p-tld-ds
p-tld-ns
p-tld-ns-no-ds
p-by-ns
'''.strip().splitlines()
p_files = {}
for this_file in p_file_names:
p_files[this_file] = open(this_file, mode="rt").read().splitlines()
# Keep track of the IDs to make sure we don't accidentally copy one
all_n_ids = []
##########
# Whenever possible, create test cases that do not also cause validation failures
##########
# All of the RRsets in the Answer, Authority, and Additional sections match RRsets found in the zone. [vnk]
# Add and change records in Answer (although this will always fail due to DNSSEC validation)
# Add and change unsigned records in Authority
# Add and change unsigned records in Addtional
# Note that deleting records is not covered here because that can't be tested
# Add a new record to Answer
id = "ffr"
compare_name = "p-dot-ns"
desc = "Start with p-dot-ns, add z.root-servers.net to Answer; will have DNSSEC validation failure"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " - . 518400 IN NS a.root-servers.net.":
file_lines.append(" - . 518400 IN NS z.root-servers.net.")
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# Change a record in Answer
id = "vpn"
compare_name = "p-dot-ns"
desc = "Start with p-dot-ns, change a.root-server.net to z.root-servers.net in Answer; will have DNSSEC validation failure"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " - . 518400 IN NS a.root-servers.net.":
file_lines.append(" - . 518400 IN NS z.root-servers.net.")
else:
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# Add a new record to Authority
id = "zoc"
compare_name = "p-tld-ns"
desc = "Start with p-tld-ns, add z.cctld.us to Authority; use NS because it is unsigned"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " - us. 172800 IN NS c.cctld.us.":
file_lines.append(" - us. 172800 IN NS z.cctld.us.")
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# Change a record in Authority
id = "gye"
compare_name = "p-tld-ns"
desc = "Start with p-tld-ns, change c.cctld.us to z.cctld.us in Authority; use NS because it is unsigned"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " - us. 172800 IN NS c.cctld.us.":
file_lines.append(" - us. 172800 IN NS z.cctld.us.")
else:
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# Add a new record to Additional
id = "rse"
compare_name = "p-tld-ns"
desc = "Start with p-tld-ns, add an A for c.cctld.us in Additional"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " - c.cctld.us. 172800 IN A 172.16.58.3":
file_lines.append(" - c.cctld.us. 172800 IN A 172.16.17.32")
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# Change a record in Additional
id = "ykm"
compare_name = "p-tld-ns"
desc = "Start with p-tld-ns, change A for c.cctld.us in Addtional"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " - c.cctld.us. 172800 IN A 172.16.58.3":
file_lines.append(" - c.cctld.us. 172800 IN A 172.16.17.32")
else:
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
##########
# All RRsets that are signed have their signatures validated. [yds]
# Change the RRSIGs in different ways
# p-tld-ds has signed DS records for .us in the answer; the RRSIG looks like:
# - us. 86400 IN RRSIG DS 8 1 86400 20200513170000 20200430160000 48903 . iwAdFM7FNufqTpU/pe1nySyTeND3C2KvzXgMYR3+yLMXhu1bqbQ+Dy7G . . .
# Change the RDATA
id = "uuc"
compare_name = "p-dot-dnskey"
desc = "Start with p-dot-dnskey, change the RRSIG RData in the Answer; causes validation failure"
file_lines = []
for this_line in p_files[compare_name]:
if "AwEAAaz/tAm8yTn4Mfeh" in this_line:
file_lines.append(this_line.replace("AwEAAaz/tAm8yTn4Mfeh", "AwEAAaz/tAm8yTn4MfeH"))
else:
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# Change the signature value itself
id = "gut"
compare_name = "p-dot-dnskey"
desc = "Start with p-dot-dnskey, change the RRSIG signature; causes validation failure"
file_lines = []
for this_line in p_files[compare_name]:
if this_line.startswith(" - . 172800 IN RRSIG DNSKEY"):
file_lines.append(this_line.replace("Q", "q"))
else:
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
##########
# For positive responses with QNAME = <TLD> and QTYPE = NS, a correct result requires all of the following: [hmk]
# Use p-tld-ns
# The header AA bit is not set. [ujy]
id = "xpa"
compare_name = "p-tld-ns"
desc = "Start with p-tld-ns, set the AA bit"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " flags: qr":
file_lines.append(" flags: qr aa")
else:
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# The Answer section is empty. [aeg]
id = "aul"
compare_name = "p-tld-ns"
desc = "Start with p-tld-ns, create a bogus Answer section with the NS records"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " AUTHORITY_SECTION:":
file_lines.append(" ANSWER_SECTION:")
file_lines.append(" - us. 172800 IN NS c.cctld.us.")
file_lines.append(" - us. 172800 IN NS k.cctld.us.")
file_lines.append(" - us. 172800 IN NS a.cctld.us.")
file_lines.append(" - us. 172800 IN NS b.cctld.us.")
file_lines.append(" - us. 172800 IN NS f.cctld.us.")
file_lines.append(" - us. 172800 IN NS e.cctld.us.")
file_lines.append(" AUTHORITY_SECTION:")
else:
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# The Authority section contains the entire NS RRset for the query name. [pdd]
id = "mbh"
compare_name = "p-tld-ns"
desc = "Start with p-tld-ns, remove NS k.cctld.us. from the Authority section"
file_lines = []
for this_line in p_files[compare_name]:
if this_line == " - us. 172800 IN NS k.cctld.us.":
continue
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# If the DS RRset for the query name exists in the zone: [hue]
# The Authority section contains the signed DS RRset for the query name. [kbd]
id = "csl"
compare_name = "p-tld-ns"
desc = "Start with p-tld-ns, remove one of the DS records from the Authority section; will cause validation failure"
file_lines = []
removed_one = False
for this_line in p_files[compare_name]:
if ("- us. 86400 IN DS" in this_line) and not removed_one:
removed_one = True
continue
file_lines.append(this_line)
create_n_file(id, compare_name, desc, file_lines)
# If the DS RRset for the query name does not exist in the zone: [fot]
# The Authority section contains no DS RRset. [bgr]
# The Authority section contains a signed NSEC RRset covering the query name. [mkl]
id = "jke"
compare_name = "p-tld-ns-no-ds"
desc = "Start with p-tld-ns-no-ds, add a DS records to the Authority | |
'x']))
self.assertEqual('poly', cladistic(tree2, ['g', 'h']))
msg = 'Node y is not in self'
with self.assertRaisesRegex(MissingNodeError, msg):
cladistic(tree2, ['y', 'b'])
assign_taxa(tree2)
self.assertEqual('uni', cladistic(tree2, ['a']))
self.assertEqual('mono', cladistic(tree2, ['a', 'b']))
self.assertEqual('poly', cladistic(tree2, ['g', 'h']))
def test_check_monophyly(self):
newick = '(((a,b)n4,(c,d)n5,(e,f)n6)n2,(g,(h,i)n7)n3)n1;'
tree = TreeNode.read([newick])
assign_taxa(tree)
res = check_monophyly(tree, 'a')
self.assertListEqual([res[0], res[1].name], ['strict', 'a'])
res = check_monophyly(tree, 'ab')
self.assertListEqual([res[0], res[1].name], ['strict', 'n4'])
res = check_monophyly(tree, 'abc')
self.assertListEqual([res[0], res[1].name], ['rejected', 'n2'])
res = check_monophyly(tree, 'abcd')
self.assertListEqual([res[0], res[1].name], ['relaxed', 'n2'])
res = check_monophyly(tree, 'abcde')
self.assertListEqual([res[0], res[1].name], ['rejected', 'n2'])
res = check_monophyly(tree, 'abcdef')
self.assertListEqual([res[0], res[1].name], ['strict', 'n2'])
def test_compare_length(self):
tree = TreeNode.read(['((a:1.000000001,(b:1.000000002,c:1):1):3,f)g;'])
self.assertTrue(_compare_length(tree.find('f'), tree.find('g')))
self.assertTrue(_compare_length(tree.find('a'), tree.find('b')))
self.assertTrue(_compare_length(tree.find('c'), tree.find('c').parent))
self.assertFalse(_compare_length(tree.find('c'),
tree.find('a').parent))
self.assertFalse(_compare_length(tree.find('a').parent,
tree.find('f')))
self.assertFalse(_compare_length(tree.find('f'),
tree.find('a').parent))
def test_compare_branch_lengths(self):
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree1, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree2 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree1, tree2))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree3 = TreeNode.read(['(f:1,((b:1,c:1)d:1,a:1)e:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree1, tree3))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree3 = TreeNode.read(['(f:1,((b:1,c:1)d:1,a:1)e:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree3, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree4 = TreeNode.read(['((a:2,(b:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree1, tree4))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree4 = TreeNode.read(['((a:2,(b:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree4, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree5 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree1, tree5))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree5 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree5, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree7 = TreeNode.read(['((a:1,(b:1,c:1):1)e:1,f:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree1, tree7))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree7 = TreeNode.read(['((a:1,(b:1,c:1):1)e:1,f:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree7, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree8 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1):1;'])
self.assertTrue(compare_branch_lengths(tree1, tree8))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree8 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1):1;'])
self.assertTrue(compare_branch_lengths(tree8, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree6 = TreeNode.read(['(f:1, ((a:1, b:1)c:1 ,d:1)e:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree1, tree6))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree6 = TreeNode.read(['(f:1, ((a:1, b:1)c:1 ,d:1)e:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree6, tree1))
tree9 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)f:1)g:1,h:1)i:1;'])
tree10 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)g:1)f:1,h:1)i:1;'])
self.assertTrue(compare_branch_lengths(tree9, tree10))
tree9 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)f:1)g:1,h:1)i:1;'])
tree10 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)g:1)f:1,h:1)i:1;'])
self.assertTrue(compare_branch_lengths(tree10, tree9))
tree9 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)f:1)g:1,h:1)i:1;'])
tree12 = TreeNode.read(['(((a:1,b:1):1,(h:1,e:1):1):1,d:1):1;'])
self.assertFalse(compare_branch_lengths(tree9, tree12))
tree9 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)f:1)g:1,h:1)i:1;'])
tree12 = TreeNode.read(['(((a:1,b:1):1,(h:1,e:1):1):1,d:1):1;'])
self.assertFalse(compare_branch_lengths(tree12, tree9))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree11 = TreeNode.read(['((a:1,(x:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree1, tree11))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree11 = TreeNode.read(['((a:1,(x:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree11, tree1))
def test_assign_supports(self):
tree = TreeNode.read(["((a,b)95,(c,d):1.1,(e,f)'80:Dmel':1.0);"])
assign_supports(tree)
# standalone support value
self.assertEqual(tree.lca(['a', 'b']).support, 95)
# no support value
self.assertIsNone(tree.lca(['c', 'd']).support)
# support value before node name
self.assertEqual(tree.lca(['e', 'f']).support, 80)
# stripped support value from node name
self.assertEqual(tree.lca(['e', 'f']).name, 'Dmel')
def test_support_to_label(self):
# unnamed nodes
tree = TreeNode.read(['((a,b)100,((c,d)95,(e,f)99)80);'])
assign_supports(tree)
self.assertEqual(str(tree), '((a,b),((c,d),(e,f)));\n')
support_to_label(tree)
self.assertEqual(str(tree), '((a,b)100,((c,d)95,(e,f)99)80);\n')
# named nodes
tree = TreeNode.read(["((a,b)'100:n2',(c,d)'95:n3')n1;"])
assign_supports(tree)
self.assertEqual(str(tree), '((a,b)n2,(c,d)n3)n1;\n')
support_to_label(tree)
self.assertEqual(str(tree), "((a,b)'100:n2',(c,d)'95:n3')n1;\n")
# unusual cases
tree = TreeNode.read(['(((a,b)n2,(c,d)n3)n6,(e,f)n4,(g,h)n5)n1;'])
tree.find('n2').support = 100
tree.find('n3').support = 0
tree.find('n4').support = ''
tree.find('n5').support = None
# n6 has no `support` attribute
tree.find('a').support = 95 # tips shouldn't have support
support_to_label(tree)
exp = "(((a,b)'100:n2',(c,d)'0:n3')n6,(e,f)n4,(g,h)n5)n1;\n"
self.assertEqual(str(tree), exp)
def test_walk_copy(self):
tree1 = TreeNode.read(['(((a:1.0,b:0.8)c:2.4,(d:0.8,e:0.6)f:1.2)g:0.4,'
'(h:0.5,i:0.7)j:1.8)k;'])
# test pos = root
msg = 'Cannot walk from root of a rooted tree.'
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('k'), tree1.find('j'))
msg = 'Source and node are not neighbors.'
# test pos = derived
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('a'), tree1.find('b'))
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('c'), tree1.find('f'))
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('f'), tree1.find('j'))
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('f'), tree1.find('k'))
# test pos = basal
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('g'), tree1.find('a'))
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('g'), tree1.find('k'))
# pos = derived, move = up
exp = TreeNode.read(['(b:0.8,((d:0.8,e:0.6)f:1.2,(h:0.5,i:0.7)j:2.2)'
'g:2.4)c:1.0;'])
obs = walk_copy(tree1.find('c'), tree1.find('a'))
self.assertTrue(_exact_compare(exp, obs))
# pos = derived, move = down
exp = TreeNode.read(['(d:0.8,e:0.6)f:1.2;'])
obs = walk_copy(tree1.find('f'), tree1.find('g'))
self.assertTrue(_exact_compare(exp, obs))
# pos = basal, move = top
exp = TreeNode.read(['((d:0.8,e:0.6)f:1.2,(h:0.5,i:0.7)j:2.2)g:2.4;'])
obs = walk_copy(tree1.find('g'), tree1.find('c'))
self.assertTrue(_exact_compare(exp, obs))
# pos = basal, move = bottom
exp = TreeNode.read(['(h:0.5,i:0.7)j:2.2;'])
obs = walk_copy(tree1.find('j'), tree1.find('g'))
self.assertTrue(_exact_compare(exp, obs))
tree2 = TreeNode.read(['(((a:1.0,b:0.8)c:2.4,d:0.8)e:0.6,f:1.2,'
'g:0.4)h:0.5;'])
# pos = basal, move = down
exp = TreeNode.read(['((a:1.0,b:0.8)c:2.4,d:0.8)e:0.6;'])
obs = walk_copy(tree2.find('e'), tree2.find('h'))
self.assertTrue(_exact_compare(exp, obs))
# pos = basal, move = up
exp = TreeNode.read(['(d:0.8,(f:1.2,g:0.4)h:0.6)e:2.4;'])
obs = walk_copy(tree2.find('e'), tree2.find('c'))
self.assertTrue(_exact_compare(exp, obs))
def test_root_above(self):
# test rooted tree
tree1 = TreeNode.read(['(((a:1.0,b:0.8)c:2.4,(d:0.8,e:0.6)f:1.2)g:0.4,'
'(h:0.5,i:0.7)j:1.8)k;'])
tree1_cg = root_above(tree1.find('c'))
exp = TreeNode.read(['((a:1.0,b:0.8)c:1.2,((d:0.8,e:0.6)f:1.2,(h:0.5,'
'i:0.7)j:2.2)g:1.2);'])
self.assertTrue(_exact_compare(exp, tree1_cg))
tree1_ij = root_above(tree1.find('i'))
exp = TreeNode.read(['(i:0.35,(h:0.5,((a:1.0,b:0.8)c:2.4,(d:0.8,'
'e:0.6)f:1.2)g:2.2)j:0.35);'])
self.assertTrue(_exact_compare(exp, tree1_ij))
# test unrooted tree
tree2 = TreeNode.read(['(((a:0.6,b:0.5)g:0.3,c:0.8)h:0.4,(d:0.4,'
'e:0.5)i:0.5,f:0.9)j;'])
tree2_ag = root_above(tree2.find('a'))
exp = TreeNode.read(['(a:0.3,(b:0.5,(c:0.8,((d:0.4,e:0.5)i:0.5,'
'f:0.9)j:0.4)h:0.3)g:0.3);'])
self.assertTrue(_exact_compare(exp, tree2_ag))
tree2_gh = root_above(tree2.find('g'))
exp = TreeNode.read(['((a:0.6,b:0.5)g:0.15,(c:0.8,((d:0.4,e:0.5)i:0.5,'
'f:0.9)j:0.4)h:0.15);'])
self.assertTrue(_exact_compare(exp, tree2_gh))
# test unrooted tree with 1 basal node
tree3 = TreeNode.read(['(((a:0.4,b:0.3)e:0.1,(c:0.4,'
'd:0.1)f:0.2)g:0.6)h:0.2;'])
tree3_ae = root_above(tree3.find('a'))
exp = TreeNode.read(['(a:0.2,(b:0.3,((c:0.4,d:0.1)f:0.2,'
'h:0.6)g:0.1)e:0.2);'])
self.assertTrue(_exact_compare(exp, tree3_ae))
def test_unroot_at(self):
# sample example from doctest of scikit-bio's `root_at`
tree = TreeNode.read(['(((a,b)c,(d,e)f)g,h)i;'])
obs = unroot_at(tree.find('c'))
exp = TreeNode.read(['(((d,e)f,h)g,a,b)c;'])
self.assertTrue(_exact_compare(obs, exp))
# test branch support handling
tree.find('c').support = 95
tree.find('f').support = 99
obs = unroot_at(tree.find('c'))
exp = TreeNode.read(["(((d,e)'99:f',h)'95:g',a,b)c;"])
assign_supports(exp)
self.assertTrue(_exact_compare(obs, exp))
# test branch length handling
tree = TreeNode.read([
'(((a:1.1,b:2.2)c:1.3,(d:1.4,e:0.8)f:0.6)g:0.4,h:3.1)i;'])
obs = unroot_at(tree.find('c'))
exp = TreeNode.read([
'(((d:1.4,e:0.8)f:0.6,h:3.5)g:1.3,a:1.1,b:2.2)c;'])
self.assertTrue(_exact_compare(obs, exp))
def test_exact_compare(self):
# test name
tree0 = TreeNode.read(['((e,d)f,(c,(a,b)));'])
tree1 = TreeNode.read(['(((a,b),c),(d,e)f);'])
self.assertTrue(_exact_compare(tree1, tree1))
self.assertFalse(_exact_compare(tree0, tree1))
# test length
tree2 = TreeNode.read(['(((a:1,b):2,c:1),(d:1,e:2)f:1);'])
self.assertTrue(_exact_compare(tree2, tree2))
self.assertFalse(_exact_compare(tree1, tree2))
tree3 = TreeNode.read(['(((a:1,b:0.0):2,c:1):0.0,(d:1,e:2)f:1);'])
self.assertTrue(_exact_compare(tree3, tree3))
self.assertFalse(_exact_compare(tree2, tree3))
# test support
tree4 = TreeNode.read(['(((a:1,b:1)95:2,c:1)98:3,(d:1,e:2)0.0:1);'])
tree5 = TreeNode.read(['(((a:1,b:1)95:2,c:1)98:3,(d:1,e:2):1);'])
assign_supports(tree4)
self.assertTrue(_exact_compare(tree4, tree4))
self.assertFalse(_exact_compare(tree4, tree5))
assign_supports(tree5)
self.assertFalse(_exact_compare(tree4, tree5))
def test_calc_split_metrics(self):
"""Example from Fig. 9a of Puigbo, et al., 2009, J Biol.
/-A
/n9------|
/n8------| \\-B
| |
/n4------| \\-C
| |
| | /-D
| \n7------|
| \\-E
|
| /-F
-n1------| /n6------|
| | \\-G
|-n3------|
| | /-H
| \n5------|
| \\-I
|
| /-J
\n2------|
\\-K
"""
tree = TreeNode.read([
'((((A,B)n9,C)n8,(D,E)n7)n4,((F,G)n6,(H,I)n5)n3,(J,K)n2)n1;'
])
calc_split_metrics(tree)
obs = {x.name: [getattr(x, y) for y in
('n', 'splits', 'prelevel', 'postlevels')]
for x in tree.traverse()}
exp = {
'n1': [11, 9, 1, [5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3]],
'n4': [5, 4, 2, [4, 4, 3, 3, 3]],
'n3': [4, 3, 2, [3, 3, 3, 3]],
'n2': [2, 1, 2, [2, 2]],
'n8': [3, 2, 3, [3, 3, 2]],
'n7': [2, 1, 3, [2, 2]],
'n6': [2, 1, 3, [2, 2]],
'n5': [2, 1, 3, [2, 2]],
'J': [1, 0, 3, [1]],
'K': [1, 0, 3, [1]],
'n9': [2, 1, 4, [2, 2]],
'C': [1, 0, 4, [1]],
'D': [1, 0, 4, [1]],
'E': [1, 0, 4, [1]],
'F': [1, 0, 4, [1]],
'G': [1, 0, 4, [1]],
'H': [1, 0, 4, [1]],
'I': [1, 0, 4, [1]],
'A': [1, 0, 5, [1]],
'B': [1, 0, 5, [1]]
}
self.assertDictEqual(obs, exp)
def test_calc_length_metrics(self):
"""Example from Fig. 1a of Parks et al. (2018):
/--1--A
/n3--1--|
| \\--1--B
/n2----2----|
| | /--1--C
-n1-| \n4----2----|
| \\----2----D
|
\\------3------E
"""
tree = TreeNode.read(['(((A:1,B:1)n3:1,(C:1,D:2)n4:2)n2:2,E:3)n1;'])
calc_length_metrics(tree)
obs = {x.name: {'height': x.height, 'depths': x.depths,
'red': round(x.red, 7)} for x in tree.traverse()}
exp = {'n1': {'height': 0.0, 'depths': [4.0, 4.0, 5.0, 6.0, 3.0],
'red': 0.0},
'n2': {'height': 2.0, 'depths': [2.0, 2.0, 3.0, 4.0],
'red': 0.4210526},
'n3': {'height': 3.0, 'depths': [1.0, 1.0], 'red': 0.7105263},
'n4': {'height': 4.0, 'depths': [1.0, 2.0], 'red': 0.7518797},
'A': {'height': 4.0, 'depths': [0.0], 'red': 1.0},
'B': {'height': 4.0, 'depths': [0.0], 'red': 1.0},
'C': {'height': 5.0, 'depths': [0.0], 'red': 1.0},
'D': {'height': 6.0, 'depths': [0.0], 'red': 1.0},
'E': {'height': 3.0, 'depths': [0.0], 'red': 1.0}}
self.assertDictEqual(obs, exp)
def test_format_newick(self):
newick = '((A_1:1.05,B_2:1.68):2.24,(C:0.28,D:1.14):1.73e-10);'
tree = TreeNode.read([newick])
# default behavior (same as TreeNode.write)
self.assertEqual(format_newick(tree), newick)
# keep space
exp = "(('A 1':1.05,'B 2':1.68):2.24,(C:0.28,D:1.14):1.73e-10);"
self.assertEqual(format_newick(tree, keep_space=True), exp)
# specify digits for float point
exp = '((A_1:1.1,B_2:1.7):2.2,(C:0.28,D:1.1):1.73e-10);'
self.assertEqual(format_newick(tree, max_f=2), exp)
# specify digits for scientific notation
exp = '((A_1:1.05,B_2:1.68):2.24,(C:0.28,D:1.14):1.7e-10);'
self.assertEqual(format_newick(tree, max_e=2), exp)
# all options enabled
exp = "(('A 1':1.1,'B 2':1.7):2.2,(C:0.28,D:1.1):1.7e-10);"
self.assertEqual(format_newick(tree, True, 2, 2), exp)
def test_root_by_outgroup(self):
tree = TreeNode.read(['((((a,b),(c,d)),(e,f)),g);'])
# outgroup is monophyletic
obs = root_by_outgroup(tree, outgroup=['a', 'b'])
exp = TreeNode.read(['((a,b),((c,d),((e,f),g)));'])
self.assertTrue(_exact_compare(obs, exp))
# outgroup is monophyletic after rotating
obs = root_by_outgroup(tree, outgroup=['e', 'f', 'g'])
exp = TreeNode.read(['(((e,f),g),((c,d),(b,a)));'])
self.assertTrue(_exact_compare(obs, exp))
# outgroup is not monophyletic
msg = 'Outgroup is not monophyletic in tree.'
with self.assertRaisesRegex(ValueError, msg):
root_by_outgroup(tree, outgroup=['a', 'c'])
# outgroup is single taxon
obs = root_by_outgroup(tree, outgroup=['a'])
exp = TreeNode.read(['(a,(b,((c,d),((e,f),g))));'])
self.assertTrue(_exact_compare(obs, exp))
# outgroup has extra taxa
obs = root_by_outgroup(tree, outgroup=['a', 'b', 'x'])
exp = TreeNode.read(['((a,b),((c,d),((e,f),g)));'])
self.assertTrue(_exact_compare(obs, exp))
# outgroup has extra taxa but strict mode
msg = 'Outgroup is not a subset of tree taxa.'
with self.assertRaisesRegex(ValueError, msg):
root_by_outgroup(tree, outgroup=['a', 'b', 'x'], strict=True)
# outgroup is not in tree
msg = 'None of outgroup taxa are present in tree.'
with self.assertRaisesRegex(ValueError, msg):
root_by_outgroup(tree, outgroup=['x', 'y'])
# outgroup is the whole tree
msg = 'Outgroup constitutes the entire tree.'
with self.assertRaisesRegex(ValueError, msg):
root_by_outgroup(tree, outgroup='abcdefg')
# generate unrooted tree
obs = root_by_outgroup(tree, outgroup=['a', 'b'], unroot=True)
| |
# -*- coding: utf-8 -*-
# Copyright © 2016, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
import warnings
from numbers import Number
from enum import Enum
from .data_view import DataView
from .data_set import DataSet
from .entity import Entity
from .source_link_container import SourceLinkContainer
from .datatype import DataType
from .dimensions import (Dimension, SampledDimension, RangeDimension,
SetDimension, DataFrameDimension,
DimensionType, DimensionContainer)
from . import util
from .compression import Compression
from .exceptions import InvalidUnit, IncompatibleDimensions
from .section import Section
class DataSliceMode(Enum):
Index = 1
Data = 2
class DataArray(Entity, DataSet):
def __init__(self, nixparent, h5group):
super(DataArray, self).__init__(nixparent, h5group)
self._sources = None
self._dimensions = None
@classmethod
def _create_new(cls, nixparent, h5parent, name, type_, data_type, shape,
compression):
newentity = super(DataArray, cls)._create_new(nixparent, h5parent,
name, type_)
datacompr = False
if compression == Compression.DeflateNormal:
datacompr = True
newentity._h5group.create_dataset("data", shape, data_type, datacompr)
return newentity
def _read_data(self, sl=None):
coeff = self.polynom_coefficients
origin = self.expansion_origin
sup = super(DataArray, self)
if len(coeff) or origin:
if not origin:
origin = 0.0
# when there are coefficients, convert the dtype of the returned
# data array to double
data = sup._read_data(sl).astype(DataType.Double)
util.apply_polynomial(coeff, origin, data)
else:
data = sup._read_data(sl)
return data
@property
def sources(self):
"""
A property containing all Sources referenced by the DataArray. Sources
can be obtained by index or their id. Sources can be removed from the
list, but removing a referenced Source will not remove it from the
file. New Sources can be added using the append method of the list.
This is a read only attribute.
"""
if self._sources is None:
self._sources = SourceLinkContainer(self)
return self._sources
def append_set_dimension(self, labels=None):
"""
Append a new SetDimension to the list of existing dimension
descriptors.
:returns: The newly created SetDimension.
:rtype: SetDimension
"""
dimgroup = self._h5group.open_group("dimensions")
index = len(dimgroup) + 1
setdim = SetDimension._create_new(dimgroup, index)
if labels:
setdim.labels = labels
if self._parent._parent.time_auto_update:
self.force_updated_at()
return setdim
def append_sampled_dimension(self, sampling_interval, label=None,
unit=None, offset=None):
"""
Append a new SampledDimension to the list of existing dimension
descriptors.
:param sampling_interval: The sampling interval of the SetDimension
to create.
:type sampling_interval: float
:returns: The newly created SampledDimension.
:rtype: SampledDimension
"""
dimgroup = self._h5group.open_group("dimensions")
index = len(dimgroup) + 1
smpldim = SampledDimension._create_new(dimgroup, index,
sampling_interval)
if label:
smpldim.label = label
if unit:
smpldim.unit = unit
if offset:
smpldim.offset = offset
if self._parent._parent.time_auto_update:
self.force_updated_at()
return smpldim
def append_range_dimension(self, ticks, label=None, unit=None):
"""
Append a new RangeDimension to the list of existing dimension
descriptors.
:param ticks: The ticks of the RangeDimension to create.
:type ticks: list of float
:returns: The newly created RangeDimension.
:rtype: RangeDimension
"""
dimgroup = self._h5group.open_group("dimensions")
index = len(dimgroup) + 1
rdim = RangeDimension._create_new(dimgroup, index, ticks)
if label:
rdim.label = label
rdim.unit = unit
if self._parent._parent.time_auto_update:
self.force_updated_at()
return rdim
def append_data_frame_dimension(self, data_frame, column_idx=None):
"""
Append a new DataFrameDimension to the list of existing dimension
descriptors.
:param data_frame: The referenced DataFrame
:type data_frame: nix.DataFrame
:param column_idx: Index of the referenced column of the DataFrame.
The default column determines the default label,
ticks, and unit of this Dimension.
:type column_idx: int or None
:returns: Thew newly created DataFrameDimension.
:rtype: DataFrameDimension
"""
dimgroup = self._h5group.open_group("dimensions")
index = len(dimgroup) + 1
dfdim = DataFrameDimension._create_new(dimgroup, index,
data_frame, column_idx)
if self._parent._parent.time_auto_update:
self.force_updated_at()
return dfdim
def append_alias_range_dimension(self):
"""
Append a new RangeDimension that uses the data stored in this
DataArray as ticks. This works only(!) if the DataArray is 1-D and
the stored data is numeric. A ValueError will be raised otherwise.
:returns: The created dimension descriptor.
:rtype: RangeDimension
"""
if (len(self.data_extent) > 1 or
not DataType.is_numeric_dtype(self.dtype)):
raise ValueError("AliasRangeDimensions only allowed for 1D "
"numeric DataArrays.")
if self._dimension_count() > 0:
raise ValueError("Cannot append additional alias dimension. "
"There must only be one!")
dimgroup = self._h5group.open_group("dimensions")
# check if existing unit is SI
if self.unit:
u = self.unit
if not (util.units.is_si(u) or util.units.is_compound(u)):
raise InvalidUnit(
"AliasRangeDimensions are only allowed when SI or "
"composites of SI units are used. "
"Current SI unit is {}".format(u),
"DataArray.append_alias_range_dimension"
)
return RangeDimension._create_new_alias(dimgroup, 1, self)
def delete_dimensions(self):
"""
Delete all the dimension descriptors for this DataArray.
"""
dimgroup = self._h5group.open_group("dimensions")
ndims = len(dimgroup)
for idx in range(ndims):
del dimgroup[str(idx+1)]
return True
def _dimension_count(self):
return len(self._h5group.open_group("dimensions"))
def _get_dimension_by_pos(self, index):
h5dim = self._h5group.open_group("dimensions").open_group(str(index))
dimtype = h5dim.get_attr("dimension_type")
if DimensionType(dimtype) == DimensionType.Sample:
return SampledDimension(h5dim, index)
elif DimensionType(dimtype) == DimensionType.Range:
return RangeDimension(h5dim, index)
elif DimensionType(dimtype) == DimensionType.Set:
return SetDimension(h5dim, index)
else:
raise TypeError("Invalid Dimension object in file.")
def iter_dimensions(self):
"""
1-based index dimension iterator. The method returns a generator
which returns the index starting from one and the dimensions.
"""
for idx, dim in enumerate(self.dimensions):
yield idx+1, dim
@property
def dtype(self):
"""
The data type of the data stored in the DataArray.
This is a read only property.
:return: DataType
"""
return self._h5group.group["data"].dtype
@property
def polynom_coefficients(self):
"""
The polynomial coefficients for the calibration. By default this is
set to a {0.0, 1.0} for a linear calibration with zero offset.
This is a read-write property and can be set to None
:type: list of float
"""
return tuple(self._h5group.get_data("polynom_coefficients"))
@polynom_coefficients.setter
def polynom_coefficients(self, coeff):
if not coeff:
if self._h5group.has_data("polynom_coefficients"):
del self._h5group["polynom_coefficients"]
else:
dtype = DataType.Double
self._h5group.write_data("polynom_coefficients", coeff, dtype)
if self._parent._parent.time_auto_update:
self.force_updated_at()
@property
def expansion_origin(self):
"""
The expansion origin of the calibration polynomial.
This is a read-write property and can be set to None.
The default value is 0.
:type: float
"""
return self._h5group.get_attr("expansion_origin")
@expansion_origin.setter
def expansion_origin(self, eo):
util.check_attr_type(eo, Number)
self._h5group.set_attr("expansion_origin", eo)
if self._parent._parent.time_auto_update:
self.force_updated_at()
@property
def label(self):
"""
The label of the DataArray. The label corresponds to the label of the
x-axis of a plot. This is a read-write property and can be set to
None.
:type: str
"""
return self._h5group.get_attr("label")
@label.setter
def label(self, l):
util.check_attr_type(l, str)
self._h5group.set_attr("label", l)
if self._parent._parent.time_auto_update:
self.force_updated_at()
@property
def unit(self):
"""
The unit of the values stored in the DataArray. This is a read-write
property and can be set to None.
:type: str
"""
return self._h5group.get_attr("unit")
@unit.setter
def unit(self, u):
if u:
u = util.units.sanitizer(u)
if u == "":
u = None
util.check_attr_type(u, str)
if (self._dimension_count() == 1 and
self.dimensions[0].dimension_type == DimensionType.Range and
self.dimensions[0].is_alias and u is not None):
if not (util.units.is_si(u) or util.units.is_compound(u)):
raise InvalidUnit(
"[{}]: Non-SI units are not allowed if the DataArray "
"has an AliasRangeDimension.".format(u),
"DataArray.unit"
)
self._h5group.set_attr("unit", u)
if self._parent._parent.time_auto_update:
self.force_updated_at()
def get_slice(self, positions, extents=None, mode=DataSliceMode.Index):
datadim = len(self.shape)
if not len(positions) == datadim:
raise IncompatibleDimensions(
"Number of positions given ({}) does not match "
"number of data dimensions ({})".format(
len(positions), datadim
),
"DataArray.get_slice"
)
if extents and not len(extents) == datadim:
raise IncompatibleDimensions(
"Number of extents given ({}) does not match "
"number of data dimensions ({})".format(
len(extents), datadim
),
"DataArray.get_slice"
)
if mode == DataSliceMode.Index:
sl = tuple(slice(p, p+e) for p, e in zip(positions, extents))
return DataView(self, sl)
elif mode == DataSliceMode.Data:
return self._get_slice_bydim(positions, extents)
else:
raise ValueError("Invalid slice mode specified. "
"Supported modes are DataSliceMode.Index and "
"DataSliceMode.Data")
def _get_slice_bydim(self, positions, extents):
dpos, dext = [], []
for dim, pos, ext in zip(self.dimensions, positions, extents):
if dim.dimension_type in (DimensionType.Sample,
DimensionType.Range):
dpos.append(dim.index_of(pos))
dext.append(dim.index_of(pos+ext)-dpos[-1])
elif dim.dimension_type == DimensionType.Set:
dpos.append(int(pos))
dext.append(int(ext))
sl = tuple(slice(p, p+e) for p, e in zip(dpos, dext))
return DataView(self, sl)
@property
def data(self):
"""
DEPRECATED DO NOT USE ANYMORE! Returns self
:type: :class:`~nixio.data_array.DataArray`
"""
warnings.warn("Call to deprecated property DataArray.data",
category=DeprecationWarning)
return self
@property
def dimensions(self):
"""
A property containing all dimensions of a DataArray. Dimensions can be
obtained via their index. Adding dimensions is done using the
respective append methods for dimension descriptors.
This is a read only attribute.
:type: Container of dimension descriptors.
"""
if self._dimensions is None:
self._dimensions = DimensionContainer("dimensions", self,
Dimension)
return self._dimensions
# metadata
@property
def metadata(self):
"""
Associated metadata of the entity. Sections attached to the entity via
this attribute can provide additional annotations. This is an optional
read-write property, and can be None if no metadata is available.
:type: Section
"""
if "metadata" | |
"""Complex msg exchange scenarios"""
import misc
import srv_msg
from forge_cfg import world
def _to_list(val):
if val is not None:
if not isinstance(val, list):
return [val]
return val
#########################################################################
# DHCPv4
def _send_discover(chaddr=None, client_id=None, giaddr=None, req_opts=None):
if chaddr is not None:
srv_msg.client_sets_value('Client', 'chaddr', chaddr)
if client_id is not None:
srv_msg.client_does_include_with_value('client_id', client_id)
if giaddr is not None:
srv_msg.network_variable('source_port', '67')
srv_msg.network_variable('source_address', giaddr)
srv_msg.network_variable('destination_address', '$(SRV4_ADDR)')
srv_msg.client_sets_value('Client', 'giaddr', giaddr)
if req_opts:
for opt in req_opts:
srv_msg.client_requests_option(opt)
srv_msg.client_send_msg('DISCOVER')
def send_discover_with_no_answer(chaddr=None, client_id=None, giaddr=None):
misc.test_procedure()
_send_discover(chaddr=chaddr, client_id=client_id, giaddr=giaddr)
srv_msg.send_wait_for_message("MUST", False, "None")
def rebind_with_ack_answer(ciaddr):
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ciaddr', ciaddr)
srv_msg.client_send_msg('REQUEST')
srv_msg.send_wait_for_message('MUST', None, 'ACK')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
# TODO: what else should be checked
def rebind_with_nak_answer(chaddr=None, client_id=None, ciaddr=None):
misc.test_procedure()
if chaddr is not None:
srv_msg.client_sets_value('Client', 'chaddr', chaddr)
if client_id is not None:
srv_msg.client_does_include_with_value('client_id', client_id)
if ciaddr is not None:
srv_msg.client_sets_value('Client', 'ciaddr', ciaddr)
srv_msg.client_send_msg('REQUEST')
srv_msg.send_wait_for_message('MUST', None, 'NAK')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
def send_decline4(requested_addr):
misc.test_procedure()
# srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:22')
# srv_msg.client_does_include_with_value('client_id', '00010203040122')
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '0.0.0.0')
srv_msg.client_does_include_with_value('requested_addr', requested_addr)
srv_msg.client_send_msg('DECLINE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
def send_discover_and_check_offer(
chaddr=None, client_id=None, giaddr=None, req_opts=None,
exp_yiaddr=None, exp_client_id=None,
exp_next_server=None, exp_server_hostname=None, exp_boot_file_name=None, exp_option=None, no_exp_option=None,
no_exp_boot_file_name=None):
# send DISCOVER
misc.test_procedure()
_send_discover(chaddr=chaddr, client_id=client_id, giaddr=giaddr, req_opts=req_opts)
# check OFFER
msgs = srv_msg.send_wait_for_message('MUST', None, 'OFFER')
rcvd_yiaddr = msgs[0].yiaddr
if exp_yiaddr is not None:
assert rcvd_yiaddr == exp_yiaddr
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
if exp_option:
for opt in exp_option:
srv_msg.response_check_option_content('Response', opt.get("code"), None, 'value', opt.get("data"))
if no_exp_option:
for opt in no_exp_option:
srv_msg.response_check_include_option('Response', 'NOT ', opt.get("code"))
if exp_client_id is not None:
if exp_client_id == 'missing':
srv_msg.response_check_include_option('Response', 'NOT ', '61')
else:
srv_msg.response_check_include_option('Response', None, '61')
srv_msg.response_check_option_content('Response', '61', None, 'value', exp_client_id)
if exp_next_server is not None:
srv_msg.response_check_content('Response', None, 'siaddr', exp_next_server)
if exp_server_hostname is not None:
srv_msg.response_check_content('Response', None, 'sname', exp_server_hostname)
if exp_boot_file_name is not None:
srv_msg.response_check_content('Response', None, 'file', exp_boot_file_name)
if no_exp_boot_file_name is not None:
srv_msg.response_check_content('Response', 'NOT ', 'file', no_exp_boot_file_name)
return rcvd_yiaddr
def send_request_and_check_ack(
chaddr=None, client_id=None, requested_addr=None, ciaddr=None, server_id=None, req_opts=None,
exp_lease_time=None, exp_renew_timer=None, exp_rebind_timer=None,
exp_yiaddr=None, exp_client_id=None,
exp_next_server=None, exp_server_hostname=None, exp_boot_file_name=None,
exp_option=None, no_exp_option=None, no_exp_boot_file_name=None):
# send REQUEST
misc.test_procedure()
if chaddr is not None:
srv_msg.client_sets_value('Client', 'chaddr', chaddr)
if client_id is not None:
srv_msg.client_does_include_with_value('client_id', client_id)
if server_id is not None:
srv_msg.client_copy_option('server_id')
if requested_addr is not None:
srv_msg.client_does_include_with_value('requested_addr', requested_addr)
if ciaddr is not None:
srv_msg.client_sets_value('Client', 'ciaddr', ciaddr)
if req_opts:
for opt in req_opts:
srv_msg.client_requests_option(opt)
srv_msg.client_send_msg('REQUEST')
# check ACK
srv_msg.send_wait_for_message('MUST', None, 'ACK')
if exp_yiaddr is not None:
exp_addr = exp_yiaddr
elif requested_addr is not None:
exp_addr = requested_addr
elif ciaddr is not None:
exp_addr = ciaddr
else:
exp_addr = None
if exp_addr is not None:
srv_msg.response_check_content('Response', None, 'yiaddr', exp_addr)
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_include_option('Response', None, '51')
if exp_lease_time is not None:
srv_msg.response_check_option_content('Response', '51', None, 'value', exp_lease_time)
if exp_renew_timer is not None:
missing = 'NOT ' if exp_renew_timer == 'missing' else None
srv_msg.response_check_include_option('Response', missing, '58')
if not missing:
srv_msg.response_check_option_content('Response', '58', None, 'value', exp_renew_timer)
if exp_rebind_timer is not None:
missing = 'NOT ' if exp_rebind_timer == 'missing' else None
srv_msg.response_check_include_option('Response', missing, '59')
if not missing:
srv_msg.response_check_option_content('Response', '59', None, 'value', exp_rebind_timer)
if exp_client_id is not None:
if exp_client_id == 'missing':
srv_msg.response_check_include_option('Response', 'NOT ', '61')
else:
srv_msg.response_check_include_option('Response', None, '61')
srv_msg.response_check_option_content('Response', '61', None, 'value', exp_client_id)
if no_exp_boot_file_name is not None:
srv_msg.response_check_content('Response', 'NOT ', 'file', no_exp_boot_file_name)
if exp_next_server is not None:
srv_msg.response_check_content('Response', None, 'siaddr', exp_next_server)
if exp_server_hostname is not None:
srv_msg.response_check_content('Response', None, 'sname', exp_server_hostname)
if exp_boot_file_name is not None:
srv_msg.response_check_content('Response', None, 'file', exp_boot_file_name)
if exp_option:
for opt in exp_option:
srv_msg.response_check_option_content('Response', opt.get("code"), None, 'value', opt.get("data"))
if no_exp_option:
for opt in no_exp_option:
srv_msg.response_check_include_option('Response', 'NOT ', opt.get("code"))
def get_address4(chaddr=None, client_id=None, giaddr=None, req_opts=None,
exp_yiaddr=None, exp_lease_time=None, exp_renew_timer=None, exp_rebind_timer=None,
exp_client_id=None,
exp_next_server=None, exp_server_hostname=None, exp_boot_file_name=None, exp_option=None,
no_exp_option=None, no_exp_boot_file_name=None):
# send DISCOVER and check OFFER
rcvd_yiaddr = send_discover_and_check_offer(
chaddr=chaddr, client_id=client_id, giaddr=giaddr, req_opts=_to_list(req_opts),
exp_yiaddr=exp_yiaddr, exp_client_id=exp_client_id,
exp_next_server=exp_next_server, exp_server_hostname=exp_server_hostname, exp_boot_file_name=exp_boot_file_name,
exp_option=_to_list(exp_option), no_exp_option=_to_list(no_exp_option),
no_exp_boot_file_name=no_exp_boot_file_name)
# send REQUEST and check ACK
send_request_and_check_ack(
chaddr=chaddr, client_id=client_id, requested_addr=rcvd_yiaddr, server_id=True, req_opts=_to_list(req_opts),
exp_lease_time=exp_lease_time, exp_renew_timer=exp_renew_timer, exp_rebind_timer=exp_rebind_timer,
exp_client_id=exp_client_id,
exp_next_server=exp_next_server, exp_server_hostname=exp_server_hostname, exp_boot_file_name=exp_boot_file_name,
exp_option=_to_list(exp_option), no_exp_option=_to_list(no_exp_option),
no_exp_boot_file_name=no_exp_boot_file_name)
return rcvd_yiaddr
#########################################################################
# DHCPv6
DHCPv6_STATUS_CODES = {
'Success': '0',
'UnspecFail': '1',
'NoAddrsAvail': '2',
'NoBinding': '3',
'NotOnLink': '4',
'UseMulticast': '5'
}
def _check_ia_na_options(exp_ia_na_t1,
exp_ia_na_t2,
exp_ia_na_status_code,
exp_ia_na_iaaddr_addr,
exp_ia_na_iaaddr_preflft,
exp_ia_na_iaaddr_validlft):
srv_msg.response_check_include_option('Response', None, 'IA_NA')
# check IA_NA
if exp_ia_na_t1 is not None:
srv_msg.response_check_option_content('Response', 'IA_NA', None, 'T1', exp_ia_na_t1)
if exp_ia_na_t2 is not None:
srv_msg.response_check_option_content('Response', 'IA_NA', None, 'T2', exp_ia_na_t2)
# check IA_NA/status_code
if exp_ia_na_status_code is not None:
if exp_ia_na_status_code in DHCPv6_STATUS_CODES:
exp_ia_na_status_code = DHCPv6_STATUS_CODES[exp_ia_na_status_code]
elif not exp_ia_na_status_code.isdigit():
raise Exception("exp_ia_na_status_code value '%s' should be a digit or status code name" % exp_ia_na_status_code)
srv_msg.response_check_option_content('Response', 'IA_NA', None, 'sub-option', 'status-code')
srv_msg.response_check_suboption_content('Response', 'status-code', 'IA_NA', None, 'statuscode', exp_ia_na_status_code)
# check IA_NA/IA_address
if exp_ia_na_iaaddr_addr is not None or exp_ia_na_iaaddr_validlft is not None or exp_ia_na_iaaddr_preflft is not None:
srv_msg.response_check_option_content('Response', 'IA_NA', None, 'sub-option', 'IA_address')
if exp_ia_na_iaaddr_addr is not None:
srv_msg.response_check_suboption_content('Response', 'IA_address', 'IA_NA', None, 'addr', exp_ia_na_iaaddr_addr)
if exp_ia_na_iaaddr_preflft is not None:
srv_msg.response_check_suboption_content('Response', 'IA_address', 'IA_NA', None, 'preflft', exp_ia_na_iaaddr_preflft)
if exp_ia_na_iaaddr_validlft is not None:
srv_msg.response_check_suboption_content('Response', 'IA_address', 'IA_NA', None, 'validlft', exp_ia_na_iaaddr_validlft)
def _check_ia_pd_options(exp_ia_pd_iaprefix_prefix=None,
exp_ia_pd_iaprefix_plen=None):
# IA-PD checks
srv_msg.response_check_include_option('Response', None, 'IA_PD')
if exp_ia_pd_iaprefix_prefix is not None:
srv_msg.response_check_option_content('Response', 'IA_PD', None, 'sub-option', 'IA-Prefix')
srv_msg.response_check_suboption_content('Response', 'IA-Prefix', 'IA_PD', None, 'prefix', exp_ia_pd_iaprefix_prefix)
if exp_ia_pd_iaprefix_plen is not None:
srv_msg.response_check_option_content('Response', 'IA_PD', None, 'sub-option', 'IA-Prefix')
srv_msg.response_check_suboption_content('Response', 'IA-Prefix', 'IA_PD', None, 'plen', exp_ia_pd_iaprefix_plen)
def _send_and_check_response(req_ia,
exp_msg_type,
exp_ia_na_t1,
exp_ia_na_t2,
exp_ia_na_status_code,
exp_ia_na_iaaddr_addr,
exp_ia_na_iaaddr_preflft,
exp_ia_na_iaaddr_validlft,
exp_ia_pd_iaprefix_prefix,
exp_ia_pd_iaprefix_plen,
exp_rapid_commit,
exp_option,
no_exp_option):
msgs = srv_msg.send_wait_for_message('MUST', None, exp_msg_type)
if exp_msg_type == 'RELAYREPLY':
srv_msg.response_check_include_option('Response', None, 'relay-msg')
srv_msg.response_check_option_content('Response', 'relay-msg', None, 'Relayed', 'Message')
if req_ia == 'IA-NA':
_check_ia_na_options(exp_ia_na_t1,
exp_ia_na_t2,
exp_ia_na_status_code,
exp_ia_na_iaaddr_addr,
exp_ia_na_iaaddr_preflft,
exp_ia_na_iaaddr_validlft)
if req_ia == 'IA-PD':
_check_ia_pd_options(exp_ia_pd_iaprefix_prefix,
exp_ia_pd_iaprefix_plen)
if exp_rapid_commit:
srv_msg.response_check_include_option('Response', None, 'rapid_commit')
if exp_option:
for opt in exp_option:
srv_msg.response_check_option_content('Response', opt.get("code"), None, 'value', opt.get("data"))
if no_exp_option:
for opt in no_exp_option:
srv_msg.response_check_include_option('Response', 'NOT ', opt.get("code"))
def send_solicit_and_check_response(duid=None, relay_addr=None, req_ia='IA-NA', rapid_commit=False,
interface_id=None,
exp_ia_na_t1=None,
exp_ia_na_t2=None,
exp_ia_na_status_code=None,
exp_ia_na_iaaddr_addr=None,
exp_ia_na_iaaddr_preflft=None,
exp_ia_na_iaaddr_validlft=None,
exp_ia_pd_iaprefix_prefix=None,
exp_ia_pd_iaprefix_plen=None,
req_opts=None,
exp_option=None,
no_exp_option=None):
# send SOLICIT
misc.test_procedure()
srv_msg.client_requests_option('1')
if duid is not None:
srv_msg.client_sets_value('Client', 'DUID', duid)
#if client_id is not None:
# srv_msg.client_does_include_with_value('client_id', client_id)
srv_msg.client_does_include('Client', None, 'client-id')
if req_ia is not None:
srv_msg.client_does_include('Client', None, req_ia)
if req_opts is not None:
for opt in req_opts:
srv_msg.client_requests_option(opt)
if rapid_commit:
srv_msg.client_does_include('Client', None, 'rapid-commit')
srv_msg.client_send_msg('SOLICIT')
# add relay agent stuff
if relay_addr is not None:
srv_msg.client_sets_value('RelayAgent', 'linkaddr', relay_addr)
if interface_id is not None:
srv_msg.client_sets_value('RelayAgent', 'ifaceid', interface_id)
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
if relay_addr is not None or interface_id is not None:
srv_msg.create_relay_forward()
# check response
if relay_addr is not None or interface_id is not None:
exp_msg_type = 'RELAYREPLY'
elif rapid_commit:
exp_msg_type = 'REPLY'
else:
exp_msg_type = 'ADVERTISE'
_send_and_check_response(req_ia,
exp_msg_type,
exp_ia_na_t1,
exp_ia_na_t2,
exp_ia_na_status_code,
exp_ia_na_iaaddr_addr,
exp_ia_na_iaaddr_preflft,
exp_ia_na_iaaddr_validlft,
exp_ia_pd_iaprefix_prefix,
exp_ia_pd_iaprefix_plen,
rapid_commit,
exp_option,
no_exp_option)
# srv_msg.response_check_include_option('Response', None, '1')
# srv_msg.response_check_include_option('Response', None, '54')
# srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
# srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
# if exp_client_id is not None:
# if exp_client_id == 'missing':
# srv_msg.response_check_include_option('Response', 'NOT ', '61')
# else:
# srv_msg.response_check_include_option('Response', None, '61')
# srv_msg.response_check_option_content('Response', '61', None, 'value', exp_client_id)
#return rcvd_yiaddr
return None
def send_request_and_check_reply(duid=None,
req_ia=None,
interface_id=None,
exp_ia_na_t1=None,
exp_ia_na_t2=None,
exp_ia_na_status_code=None,
exp_ia_na_iaaddr_addr=None,
exp_ia_na_iaaddr_preflft=None,
exp_ia_na_iaaddr_validlft=None,
exp_ia_pd_iaprefix_prefix=None,
exp_ia_pd_iaprefix_plen=None,
req_opts=None,
exp_option=None,
no_exp_option=None):
# send REQUEST
misc.test_procedure()
world.sender_type = "Client"
if duid is not None:
srv_msg.client_sets_value('Client', 'DUID', duid)
# if client_id is not None:
# srv_msg.client_does_include_with_value('client_id', client_id)
# if server_id is not None:
# srv_msg.client_copy_option('server_id')
# if requested_addr is not None:
# srv_msg.client_does_include_with_value('requested_addr', requested_addr)
# if ciaddr is not None:
# srv_msg.client_sets_value('Client', 'ciaddr', ciaddr)
# srv_msg.client_requests_option('1')
if req_ia == 'IA-NA':
srv_msg.client_copy_option('IA_NA')
if req_ia == 'IA-PD':
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('server-id')
#srv_msg.client_save_option('server-id')
#srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
if req_opts is not None:
for opt in req_opts:
srv_msg.client_requests_option(opt)
srv_msg.client_send_msg('REQUEST')
if interface_id is not None:
srv_msg.client_sets_value('RelayAgent', 'ifaceid', interface_id)
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
# srv_msg.response_check_include_option('Response', None, '1')
# srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
# srv_msg.response_check_include_option('Response', None, '54')
# srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
# srv_msg.response_check_include_option('Response', None, '51')
# if exp_renew_timer is not None:
# missing = 'NOT ' if exp_renew_timer == 'missing' else None
# srv_msg.response_check_include_option('Response', missing, '58')
# if not missing:
# srv_msg.response_check_option_content('Response', '58', None, 'value', exp_renew_timer)
# if exp_rebind_timer is not None:
# missing = 'NOT ' if exp_rebind_timer == 'missing' else None
# srv_msg.response_check_include_option('Response', missing, '59')
# if not missing:
# srv_msg.response_check_option_content('Response', '59', None, 'value', exp_rebind_timer)
# if exp_client_id is not None:
# if exp_client_id == 'missing':
# srv_msg.response_check_include_option('Response', 'NOT ', | |
if redirects_remaining > 0:
location = (response.getheader('Location')
or response.getheader('location'))
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
# Make a recursive call with the gsession ID in the URI to follow
# the redirect.
return self.request(method=method, uri=uri, auth_token=auth_token,
http_request=http_request, converter=converter,
desired_class=desired_class,
redirects_remaining=redirects_remaining-1,
**kwargs)
else:
raise error_from_response('302 received without Location header',
response, RedirectError)
else:
raise error_from_response('Too many redirects from server',
response, RedirectError)
elif response.status == 401:
raise error_from_response('Unauthorized - Server responded with',
response, Unauthorized)
elif response.status == 304:
raise error_from_response('Entry Not Modified - Server responded with',
response, NotModified)
# If the server's response was not a 200, 201, 302, or 401, raise an
# exception.
else:
raise error_from_response('Server responded with', response,
RequestError)
Request = request
def request_client_login_token(
self, email, password, source, service=None,
account_type='HOSTED_OR_GOOGLE',
auth_url=atom.http_core.Uri.parse_uri(
'https://www.google.com/accounts/ClientLogin'),
captcha_token=None, captcha_response=None):
service = service or self.auth_service
# Set the target URL.
http_request = atom.http_core.HttpRequest(uri=auth_url, method='POST')
http_request.add_body_part(
gdata.gauth.generate_client_login_request_body(email=email,
password=password, service=service, source=source,
account_type=account_type, captcha_token=captcha_token,
captcha_response=captcha_response),
'application/x-www-form-urlencoded')
# Use the underlying http_client to make the request.
response = self.http_client.request(http_request)
response_body = response.read()
if response.status == 200:
token_string = gdata.gauth.get_client_login_token_string(response_body)
if token_string is not None:
return gdata.gauth.ClientLoginToken(token_string)
else:
raise ClientLoginTokenMissing(
'Recieved a 200 response to client login request,'
' but no token was present. %s' % (response_body,))
elif response.status == 403:
captcha_challenge = gdata.gauth.get_captcha_challenge(response_body)
if captcha_challenge:
challenge = CaptchaChallenge('CAPTCHA required')
challenge.captcha_url = captcha_challenge['url']
challenge.captcha_token = captcha_challenge['token']
raise challenge
elif response_body.splitlines()[0] == 'Error=BadAuthentication':
raise BadAuthentication('Incorrect username or password')
else:
raise error_from_response('Server responded with a 403 code',
response, RequestError, response_body)
elif response.status == 302:
# Google tries to redirect all bad URLs back to
# http://www.google.<locale>. If a redirect
# attempt is made, assume the user has supplied an incorrect
# authentication URL
raise error_from_response('Server responded with a redirect',
response, BadAuthenticationServiceURL,
response_body)
else:
raise error_from_response('Server responded to ClientLogin request',
response, ClientLoginFailed, response_body)
RequestClientLoginToken = request_client_login_token
def client_login(self, email, password, source, service=None,
account_type='HOSTED_OR_GOOGLE',
auth_url=atom.http_core.Uri.parse_uri(
'https://www.google.com/accounts/ClientLogin'),
captcha_token=None, captcha_response=None):
"""Performs an auth request using the user's email address and password.
In order to modify user specific data and read user private data, your
application must be authorized by the user. One way to demonstrage
authorization is by including a Client Login token in the Authorization
HTTP header of all requests. This method requests the Client Login token
by sending the user's email address, password, the name of the
application, and the service code for the service which will be accessed
by the application. If the username and password are correct, the server
will respond with the client login code and a new ClientLoginToken
object will be set in the client's auth_token member. With the auth_token
set, future requests from this client will include the Client Login
token.
For a list of service names, see
http://code.google.com/apis/gdata/faq.html#clientlogin
For more information on Client Login, see:
http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html
Args:
email: str The user's email address or username.
password: str The password for the user's account.
source: str The name of your application. This can be anything you
like but should should give some indication of which app is
making the request.
service: str The service code for the service you would like to access.
For example, 'cp' for contacts, 'cl' for calendar. For a full
list see
http://code.google.com/apis/gdata/faq.html#clientlogin
If you are using a subclass of the gdata.client.GDClient, the
service will usually be filled in for you so you do not need
to specify it. For example see BloggerClient,
SpreadsheetsClient, etc.
account_type: str (optional) The type of account which is being
authenticated. This can be either 'GOOGLE' for a Google
Account, 'HOSTED' for a Google Apps Account, or the
default 'HOSTED_OR_GOOGLE' which will select the Google
Apps Account if the same email address is used for both
a Google Account and a Google Apps Account.
auth_url: str (optional) The URL to which the login request should be
sent.
captcha_token: str (optional) If a previous login attempt was reponded
to with a CAPTCHA challenge, this is the token which
identifies the challenge (from the CAPTCHA's URL).
captcha_response: str (optional) If a previous login attempt was
reponded to with a CAPTCHA challenge, this is the
response text which was contained in the challenge.
Returns:
None
Raises:
A RequestError or one of its suclasses: BadAuthentication,
BadAuthenticationServiceURL, ClientLoginFailed,
ClientLoginTokenMissing, or CaptchaChallenge
"""
service = service or self.auth_service
self.auth_token = self.request_client_login_token(email, password,
source, service=service, account_type=account_type, auth_url=auth_url,
captcha_token=captcha_token, captcha_response=captcha_response)
ClientLogin = client_login
def upgrade_token(self, token=None, url=atom.http_core.Uri.parse_uri(
'https://www.google.com/accounts/AuthSubSessionToken')):
"""Asks the Google auth server for a multi-use AuthSub token.
For details on AuthSub, see:
http://code.google.com/apis/accounts/docs/AuthSub.html
Args:
token: gdata.gauth.AuthSubToken or gdata.gauth.SecureAuthSubToken
(optional) If no token is passed in, the client's auth_token member
is used to request the new token. The token object will be modified
to contain the new session token string.
url: str or atom.http_core.Uri (optional) The URL to which the token
upgrade request should be sent. Defaults to:
https://www.google.com/accounts/AuthSubSessionToken
Returns:
The upgraded gdata.gauth.AuthSubToken object.
"""
# Default to using the auth_token member if no token is provided.
if token is None:
token = self.auth_token
# We cannot upgrade a None token.
if token is None:
raise UnableToUpgradeToken('No token was provided.')
if not isinstance(token, gdata.gauth.AuthSubToken):
raise UnableToUpgradeToken(
'Cannot upgrade the token because it is not an AuthSubToken object.')
http_request = atom.http_core.HttpRequest(uri=url, method='GET')
token.modify_request(http_request)
# Use the lower level HttpClient to make the request.
response = self.http_client.request(http_request)
if response.status == 200:
token._upgrade_token(response.read())
return token
else:
raise UnableToUpgradeToken(
'Server responded to token upgrade request with %s: %s' % (
response.status, response.read()))
UpgradeToken = upgrade_token
def get_oauth_token(self, scopes, next, consumer_key, consumer_secret=None,
rsa_private_key=None,
url=gdata.gauth.REQUEST_TOKEN_URL):
"""Obtains an OAuth request token to allow the user to authorize this app.
Once this client has a request token, the user can authorize the request
token by visiting the authorization URL in their browser. After being
redirected back to this app at the 'next' URL, this app can then exchange
the authorized request token for an access token.
For more information see the documentation on Google Accounts with OAuth:
http://code.google.com/apis/accounts/docs/OAuth.html#AuthProcess
Args:
scopes: list of strings or atom.http_core.Uri objects which specify the
URL prefixes which this app will be accessing. For example, to access
the Google Calendar API, you would want to use scopes:
['https://www.google.com/calendar/feeds/',
'http://www.google.com/calendar/feeds/']
next: str or atom.http_core.Uri object, The URL which the user's browser
should be sent to after they authorize access to their data. This
should be a URL in your application which will read the token
information from the URL and upgrade the request token to an access
token.
consumer_key: str This is the identifier for this application which you
should have received when you registered your application with Google
to use OAuth.
consumer_secret: str (optional) The shared secret between your app and
Google which provides evidence that this request is coming from you
application and not another app. If present, this libraries assumes
you want to use an HMAC signature to verify requests. Keep this data
a secret.
rsa_private_key: str (optional) The RSA private key which is used to
generate a digital signature which is checked by Google's server. If
present, this library assumes that you want to use an RSA signature
to verify requests. Keep this data a secret.
url: The URL to which a request for a token should be made. The default
is Google's OAuth request token provider.
"""
http_request = None
if rsa_private_key is not None:
http_request = gdata.gauth.generate_request_for_request_token(
consumer_key, gdata.gauth.RSA_SHA1, scopes,
rsa_key=rsa_private_key, auth_server_url=url, next=next)
elif consumer_secret is not None:
http_request = gdata.gauth.generate_request_for_request_token(
consumer_key, gdata.gauth.HMAC_SHA1, scopes,
consumer_secret=consumer_secret, auth_server_url=url, next=next)
else:
raise MissingOAuthParameters(
'To request an OAuth token, you must provide your consumer secret'
' or your private RSA key.')
response = self.http_client.request(http_request)
response_body = response.read()
if response.status != 200:
raise error_from_response('Unable to obtain OAuth request token',
response, RequestError, response_body)
if rsa_private_key is not None:
return gdata.gauth.rsa_token_from_body(response_body, consumer_key,
rsa_private_key,
gdata.gauth.REQUEST_TOKEN)
elif consumer_secret is not None:
return gdata.gauth.hmac_token_from_body(response_body, consumer_key,
| |
0, 0, 0, 0],
[1498, 24.278857, 0, 9999, -9999, 1.0, 100, 1, 105.800802, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1499, 0.328342, 0, 9999, -9999, 1.0, 100, 1, 2.286676, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1500, 0.094494, 0, 9999, -9999, 1.0, 100, 1, 0.154817, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1501, 2.807819, 0, 9999, -9999, 1.0, 100, 1, 8.165333, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1502, 0.050659, 0, 9999, -9999, 1.0, 100, 1, 0.938928, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1503, 10.373562, 0, 9999, -9999, 1.0, 100, 1, 45.972187, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1504, 73.931215, 0, 9999, -9999, 1.0, 100, 1, 188.822836, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1505, 2.696704, 0, 9999, -9999, 1.0, 100, 1, 26.765913, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1506, 4.821477, 0, 9999, -9999, 1.0, 100, 1, 56.406717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1507, 1.284476, 0, 9999, -9999, 1.0, 100, 1, 15.438042, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1508, 0.009139, 0, 9999, -9999, 1.0, 100, 1, 0.065259, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1510, 8.064673, 0, 9999, -9999, 1.0, 100, 1, 107.008141, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1511, 35.198857, 0, 9999, -9999, 1.0, 100, 1, 155.22192, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1512, 5.406395, 0, 9999, -9999, 1.0, 100, 1, 64.130052, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1513, 3.223999, 0, 9999, -9999, 1.0, 100, 1, 23.051786, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1514, 0.00166, 0, 9999, -9999, 1.0, 100, 1, 0.027711, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1516, 0.00114, 0, 9999, -9999, 1.0, 100, 1, 0.02881, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1517, 0.065001, 0, 9999, -9999, 1.0, 100, 1, 1.286804, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1518, 0.482478, 0, 9999, -9999, 1.0, 100, 1, 0.670542, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1519, 0.033487, 0, 9999, -9999, 1.0, 100, 1, 0.04654, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
ppc["branch"] = array([
[586, 1, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[589, 108, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[590, 108, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[593, 112, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[595, 115, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[598, 118, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[599, 119, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[602, 121, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[603, 526, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[607, 127, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[608, 127, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[609, 529, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[612, 493, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[614, 130, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[616, 132, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[617, 133, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[618, 133, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[619, 134, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[624, 14, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[629, 145, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[632, 145, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[637, 148, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[638, 149, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[640, 153, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[641, 155, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[642, 533, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[643, 534, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[647, 536, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[652, 167, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[655, 170, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[663, 178, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[666, 180, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[672, 185, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[676, 19, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[681, 197, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[683, 200, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[687, 202, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[696, 211, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[697, 211, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[698, 212, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[702, 215, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[705, 217, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[707, 219, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[713, 225, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[714, 225, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[716, 226, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[717, 227, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[719, 229, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[722, 545, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[724, 238, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[727, 243, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[728, 244, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[730, 547, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[732, 247, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[735, 253, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[738, 258, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[741, 264, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[742, 264, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[747, 273, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[748, 274, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[749, 274, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[750, 557, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[758, 286, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[761, 288, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[762, 289, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[765, 560, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[767, 292, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[774, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[777, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[778, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[781, 303, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[784, 563, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[785, 501, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[787, 308, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[788, 311, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[789, 565, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[791, 314, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[792, 316, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[795, 319, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[800, 326, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[801, 327, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[802, 327, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[805, 328, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[806, 328, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[808, 329, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[809, 329, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[811, 568, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[814, 570, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[816, 335, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[817, 571, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[821, 338, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[822, 339, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[826, 339, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[834, 572, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[835, 572, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[836, 572, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[837, 350, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[839, 350, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[841, 573, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[843, 352, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[844, 352, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[850, 574, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[851, 575, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[853, 362, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[856, 363, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[857, 365, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[858, 368, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[860, 371, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[865, 375, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[867, 376, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[870, 503, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[872, 378, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[873, 576, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[874, 576, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[875, 381, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[877, 578, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[882, 388, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[883, 388, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[885, 393, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[886, 394, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[889, 397, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[890, 40, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[895, 580, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[896, 581, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[898, 403, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[900, 405, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[902, 405, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[903, 406, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[905, 413, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[906, 414, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[907, 583, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[909, 417, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[917, 43, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[918, 424, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[920, 428, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[921, 428, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[923, 432, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[925, 44, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[931, 439, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[936, 445, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[937, 447, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[939, 450, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[940, 451, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[944, 458, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[950, 462, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[952, 47, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[958, 478, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[959, 478, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[960, 479, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[963, 481, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[968, 486, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[969, 486, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[971, 51, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[973, 506, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[976, 58, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[978, 491, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[982, 62, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[984, 63, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[985, 63, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[986, 64, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[987, 65, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[988, 66, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[993, 67, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[994, 67, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[995, 509, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[997, 510, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[999, 70, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1000, 71, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1002, 71, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1003, 72, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1007, 511, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1010, 79, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1011, 79, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1012, 81, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1027, 218, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1028, 221, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1029, 268, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1030, 269, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1031, 498, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1032, 1, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1034, 4, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1035, 6, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1036, 7, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1037, 8, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1038, 9, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1039, 11, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1040, 14, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1041, 16, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1042, 17, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1043, 19, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1044, 21, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1045, 23, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1046, 25, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1047, 27, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1048, 28, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1049, 29, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1050, 31, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1051, 33, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1052, 34, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1053, 35, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1054, 36, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1055, 38, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1056, 39, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1057, 40, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1058, 41, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1059, 43, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1060, 44, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1061, 45, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1062, 47, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1063, 48, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1064, 49, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1065, 50, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1066, 51, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1067, 53, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1068, 54, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1069, 55, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1070, 57, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1071, 58, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1072, 59, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1073, 60, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1074, 62, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1075, 63, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1076, 64, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1077, 65, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1078, 66, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1079, 67, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1080, 70, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1081, 71, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1082, 72, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1083, 73, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1084, 75, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1085, 76, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1086, 77, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1087, 79, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1088, 80, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1089, 81, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1090, 82, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1091, 83, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1092, 84, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1093, 85, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1094, 88, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1095, 89, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1096, 90, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1097, 91, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1098, 92, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1099, 93, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1100, 97, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1101, 98, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1102, 101, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1103, 102, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1104, 103, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1105, 108, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1106, 109, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1107, 110, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1108, 111, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1109, 112, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1110, 113, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1111, 114, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1112, 115, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1113, 116, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1114, 118, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1115, 119, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1116, 121, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1117, 122, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1118, 126, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1119, 127, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1120, 130, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1121, 131, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1122, 132, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1123, 133, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1124, 134, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1125, 135, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1126, 136, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1127, 137, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1128, 139, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1129, 140, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1130, 141, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1131, 142, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1132, 144, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1133, 145, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1134, 146, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1135, 147, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1136, 148, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1137, 149, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1138, 150, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1139, 151, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1140, 152, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1141, 153, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1142, 154, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1143, 155, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1144, 158, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1146, 162, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1147, 163, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1148, 164, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1149, 166, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1150, 167, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1151, 168, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1152, 169, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1153, 170, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1154, 171, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1155, 172, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1156, 173, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1157, 174, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1158, 175, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1159, 176, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1160, 177, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1161, 178, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1162, 179, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1163, 180, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1164, 181, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1165, 182, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1166, 183, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1167, 185, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1168, 186, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1169, 187, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1170, 188, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1171, 189, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1172, 190, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1173, 192, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1174, 193, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1175, 194, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1176, 196, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1177, 197, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1178, 198, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1179, 199, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1180, 200, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1181, 202, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1182, 203, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1183, 204, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1184, 205, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1185, 206, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1186, 207, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1187, 208, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1188, 209, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1190, 211, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1191, 212, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1192, 213, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1193, 214, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1194, 215, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1195, 216, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1196, 217, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1197, 218, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1198, 219, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1199, 221, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1200, 222, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1201, 223, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1202, 224, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1203, 225, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1204, 226, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1205, 227, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1206, 228, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1207, 229, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1208, 230, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1209, 234, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1210, 235, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1211, 237, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1212, 238, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1213, 239, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1214, 240, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1215, 241, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1216, 242, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1217, 243, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1218, 244, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1219, 247, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1220, 251, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1221, 252, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1222, 253, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1223, 254, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1224, 255, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1225, 256, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1226, 257, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1227, 258, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1228, 260, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1229, 263, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1230, 264, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1231, 266, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1232, 267, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1233, 268, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1235, 271, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1236, 272, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1237, 273, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1238, 274, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1239, 275, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1240, 276, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1241, 278, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1242, 281, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1243, 282, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1244, 283, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1245, 284, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1246, 285, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1247, 286, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1248, 287, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1249, 288, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1250, 289, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1251, 291, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1252, 292, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1253, 293, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1254, 294, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1255, 295, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1256, 296, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1257, 297, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1258, 298, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1259, 299, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1260, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1261, 302, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1262, 303, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1263, 304, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1264, 307, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1265, 308, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1266, 309, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1267, 311, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1268, 312, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1269, 314, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1270, 316, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1271, 317, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1272, 318, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1273, 319, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1274, 321, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1275, 322, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1276, 323, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1277, 324, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1278, 325, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1279, 326, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1280, 327, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1281, 328, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1282, 329, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1283, 331, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1284, 333, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1285, 335, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1286, 337, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1287, 338, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1288, 339, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1289, 340, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1290, 341, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1291, 342, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1292, 343, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1293, 344, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1294, 345, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1295, 346, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1296, 347, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1297, 348, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1298, 350, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1299, 352, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1300, 353, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1301, 354, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1302, 355, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1303, 356, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1304, 357, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1305, 359, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1306, 361, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1307, 362, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1308, 363, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1309, 364, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1310, 365, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1311, 366, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1312, 367, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1313, 368, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1314, 369, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1315, 370, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1316, 371, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1317, 372, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1318, 373, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1319, 374, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1320, 375, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1321, 376, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1322, 377, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1323, 378, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1324, 379, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1325, 381, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1326, 384, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1327, 385, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1328, 386, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1329, 387, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1330, 388, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1331, 390, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1332, 391, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1333, 392, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1334, 393, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1335, 394, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1336, 395, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1337, 396, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1338, 397, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1339, 398, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1340, 399, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1341, 400, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1342, 403, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1343, 404, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1344, 405, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1345, 406, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1346, 407, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1347, 408, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1348, 410, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1349, 411, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1350, 412, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1351, 413, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1352, 414, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1354, 417, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1355, 418, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1356, 419, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1357, 420, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1358, 421, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1359, 422, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1360, 423, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1361, 424, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1362, 425, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1363, 426, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1364, 427, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1365, 428, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1366, 429, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1367, 430, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1368, 431, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1369, 432, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1370, 433, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1371, 434, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1372, 435, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1373, 436, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1374, 437, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1375, 438, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1376, 439, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1377, 440, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1378, 441, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1379, 442, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1380, 443, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1381, 445, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1382, 446, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1383, 447, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1384, 448, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1385, 449, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1386, 450, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1387, 451, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1388, 453, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1389, 454, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1390, 455, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1391, 456, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1392, 457, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1393, 458, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1394, 459, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1395, 460, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1396, 461, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1397, 462, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1398, 463, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1399, 464, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1400, 465, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1401, 466, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1402, 467, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1403, 468, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1404, 469, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1405, 470, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1406, 471, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1407, 472, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1408, 473, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1409, 474, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1411, 476, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1412, 477, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1413, 478, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1414, 479, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1415, 480, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1416, 481, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1417, 482, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1418, 483, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1419, 484, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1420, 485, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1421, 486, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1422, 487, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1423, 488, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1424, 489, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1425, 490, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1426, 491, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1427, 492, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1428, 493, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1429, 494, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1430, 495, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1431, 496, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1432, 497, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1433, 498, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1434, 499, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1435, 500, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1436, 501, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1437, 502, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1438, 503, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1439, 504, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1440, 505, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1441, 506, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1442, 507, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1443, 508, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1444, 509, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1445, 510, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1446, 511, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1447, 512, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1448, 513, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1449, 514, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1450, 515, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1451, 516, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1452, 517, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1453, 518, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1454, 519, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1455, 520, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1456, 521, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1457, 522, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1458, 523, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1459, 524, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1460, 525, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1461, 526, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1462, 527, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1463, 528, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1464, 529, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1465, 530, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1466, 531, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1467, 532, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1468, 533, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1469, 534, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1470, 535, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1471, 536, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1472, 537, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1473, 538, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1474, 539, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1475, 540, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1476, 541, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1477, 542, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1478, 543, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1479, 544, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1480, 545, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1481, 546, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1482, 547, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1483, 548, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1484, 549, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1485, 550, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1486, 551, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1487, 552, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1488, 554, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1489, 555, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1490, 556, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1491, 557, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1492, 558, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1493, 559, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1494, 560, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1495, 561, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1496, 562, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1497, 563, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1498, 564, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1499, 565, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1500, 566, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1501, 567, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1502, 568, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1503, 569, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1504, 570, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1505, 571, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1506, 572, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1507, 573, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1508, 574, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1510, 576, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1511, 577, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1512, 578, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1513, 579, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1514, 580, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1516, 582, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1517, 583, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1518, 584, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1519, 585, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1, 490, 0, 0.01433884297520661, 0.151691958358336, 991.0, 991.0, 991.0, 0, 2, 1, -360, 43.375 ],
[3, 4, 0, 0.006291637811634348, 0.903417549506624, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 72.681 ],
[491, 6, 0, 0.011200661157024791, 0.118492839955776, 991.0, 991.0, 991.0, 0, 2, 1, -360, 33.882 ],
[7, 5, 0, 0.005794840720221606, 0.20802058859584005, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 33.471 ],
[8, 9, 0, 0.0024379328254847646, 0.350063268897336, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 28.163 ],
[492, 11, 0, 0.018224793388429753, 0.0482004476327704, 495.0, 495.0, 495.0, 0, 1, 1, -360, 27.565 ],
[11, 493, 0, 0.030286942148760328, 0.08010209706571599, 495.0, 495.0, 495.0, 0, 1, 1, -360, 45.809 ],
[492, 493, 0, 0.04521652892561983, 0.11958747011094399, 495.0, 495.0, 495.0, 0, 1, 1, -360, 68.39 ],
[494, 14, 0, 0.012990743801652892, 0.137430291356512, 991.0, 991.0, 991.0, 0, 2, 1, -360, 39.297 ],
[13, 15, 0, 0.007681959833795014, 0.27576354266704156, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 44.371 ],
[16, 5, 0, 0.006275623268698061, 0.22527950450957998, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 36.248000000000005 ],
[17, 18, 0, 0.04623522622347646, 0.9335989000302801, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 200.291 ],
[17, 12, 0, 0.0056020313942728535, 0.113118303398186, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 24.268 ],
[14, 495, 0, 0.0017957024793388433, 0.018996904156819597, 991.0, 991.0, 991.0, 0, 1, 1, -360, 5.432 ],
[494, 19, 0, 0.010246611570247935, 0.10839986031771602, 991.0, 991.0, 991.0, 0, 1, 1, -360, 30.996 ],
[20, 21, 0, 0.005415685595567867, 0.19440984828307922, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 31.281 ],
[20, 22, 0, 0.0049706544321329645, 0.713737278110032, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 57.42100000000001 ],
[497, 23, 0, 0.002190413223140496, 0.005793146490362, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.313 ],
[23, 499, 0, 0.020799669421487598, 0.22004164444829602, 991.0, 991.0, 991.0, 0, 1, 1, -360, 62.919 ],
[25, 26, 0, 0.00141845567867036, 0.050919084651523595, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 8.193 ],
[25, 22, 0, 0.0035578254847645433, 0.0319293051869808, 856.0, 856.0, 856.0, 0, 1, 1, -360, 10.275 ],
[23, 27, 0, 0.027738181818181818, 0.073361203699828, 495.0, 495.0, 495.0, 0, 1, 1, -360, 41.95399999999999 ],
[28, 23, 0, 0.012841652892561981, 0.0339632611780132, 495.0, 495.0, 495.0, 0, 1, 1, -360, 19.423 ],
[8, 21, 0, 0.004948753462603878, 0.17764812836304802, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 28.584 ],
[9, 29, 0, 0.002212863573407202, 0.31774552934092004, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 25.563000000000002 ],
[30, 25, 0, 0.019958795013850415, 0.17911796401827998, 856.0, 856.0, 856.0, 0, 1, 1, -360, 57.641000000000005 ],
[31, 32, 0, 0.0299776084949446, 0.605319030583196, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 129.863 ],
[32, 33, 0, 0.016762234533725762, 0.33846927983213604, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 72.61399999999999 ],
[34, 35, 0, 0.001931900826446281, 0.020437759184893597, 991.0, 991.0, 991.0, 0, 2, 1, -360, 5.843999999999999 ],
[35, 36, 0, 0.0008730578512396695, 0.0092361605077588, 991.0, 991.0, 991.0, 0, 2, 1, -360, 2.641 ],
[490, 6, 0, 0.049352066115702475, 0.130525028606764, 495.0, 495.0, 495.0, 0, 1, 1, -360, 74.645 ],
[37, 10, 0, 0.02404639889196676, 0.485553838251812, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 104.169 ],
[10, 38, 0, 0.006848799630657894, 0.13829351176534158, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 29.669 ],
[37, 38, 0, 0.01437834718372576, 1.1613317560186958, 2567.0, 2567.0, 2567.0, 0, 1, 1, -360, 124.574 ],
[39, 40, 0, 0.04521629732222991, 0.913024308337812, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 195.877 ],
[39, 41, 0, 0.017466989843005543, 0.35269996139852006, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 75.667 ],
[42, 41, 0, 0.031145429362880884, 0.6289001042979919, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 134.922 ],
[18, 42, 0, 0.03439750692520776, 0.6945672650962679, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 149.01 ],
[492, 43, 0, 0.01819173553719008, 0.192452068436848, 991.0, 991.0, 991.0, 0, 2, 1, -360, 55.03 ],
[44, 45, 0, 0.02562314049586777, 0.067767398802972, 495.0, 495.0, 495.0, 0, 1, 1, -360, 38.755 ],
[44, 505, 0, 0.006061487603305785, 0.0160312607980052, 495.0, 495.0, 495.0, 0, 1, 1, -360, 9.168 ],
[46, 12, 0, 0.0014741170360110802, 0.2116687641962416, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 17.029 ],
[47, 48, 0, 0.005344182825484765, 0.01199019212302604, 428.0, 428.0, 428.0, 0, 1, 1, -360, 7.7170000000000005 ],
[49, 50, 0, 0.0019151662049861494, 0.0171874439892256, 856.0, 856.0, 856.0, 0, 1, 1, -360, 5.531000000000001 ],
[31, 33, 0, 0.013475992613088641, 0.27211225959163604, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 58.378 ],
[31, 51, 0, 0.003518611495844875, 0.5052381383693519, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 40.647 ],
[52, 53, 0, 0.010464421745152355, 1.5025884408875438, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 120.885 ],
[52, 54, 0, 0.0076126500461911354, 0.1537174637168, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 32.978 ],
[506, 55, 0, 0.012634380165289257, 0.133660287181212, 991.0, 991.0, 991.0, 0, 1, 1, -360, 38.219 ],
[506, 507, 0, 0.044157355371900825, 0.11678619613628, 495.0, 495.0, 495.0, 0, 1, 1, -360, 66.788 ],
[57, 506, 0, 0.004687272727272727, 0.049587095736244, 991.0, 991.0, 991.0, 0, 1, 1, -360, 14.179 ],
[57, 58, 0, 0.014436363636363634, 0.0381809096340232, 495.0, 495.0, 495.0, 0, 1, 1, -360, 21.835 ],
[58, 506, 0, 0.019797685950413223, 0.052360391943288, 495.0, 495.0, 495.0, 0, 1, 1, -360, 29.944000000000003 ],
[59, 60, 0, 0.019407548476454296, 0.174170863885556, 856.0, 856.0, 856.0, 0, 1, 1, -360, 56.049 ],
[508, 62, 0, 0.051111404958677685, 0.03379452026753001, 248.0, 248.0, 248.0, 0, 1, 1, -360, 38.653 ],
[30, 61, 0, 0.03143698060941828, 0.28212765137935203, 856.0, 856.0, 856.0, 0, 1, 1, -360, 90.79 ],
[63, 506, 0, 0.027457190082644623, 0.072618044249872, 495.0, 495.0, 495.0, 0, 1, 1, -360, 41.528999999999996 ],
[13, 64, 0, 0.0014816481994459833, 0.2127501654814608, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 17.116 ],
[65, 66, 0, 0.03778185595567867, 0.7629053006222161, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 163.671 ],
[59, 67, 0, 0.0051880193905817175, 0.046559297286324804, 856.0, 856.0, 856.0, 0, 1, 1, -360, 14.982999999999999 ],
[61, 67, 0, 0.012931440443213295, 0.1160517597580644, 856.0, 856.0, 856.0, 0, 1, 1, -360, 37.346 ],
[68, 69, 0, 0.011149584487534626, 0.4002427745096039, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 64.4 ],
[70, 69, 0, 0.009625346260387812, 0.345526355460808, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 55.596000000000004 ],
[71, 72, 0, 0.008878635734072021, 0.318721276477736, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 51.283 ],
[73, 74, 0, 0.012529547553116345, 0.253001288604392, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 54.278 ],
[37, 75, 0, 0.027459141274238225, 0.5544652029066119, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 118.95299999999999 ],
[72, 75, 0, 0.006688711911357341, 0.240108375006292, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 38.634 ],
[37, 72, 0, 0.036222068328739615, 0.7314094881920841, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 156.914 ],
[76, 77, 0, 0.004683777700831025, 0.6725445900750401, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 54.107 ],
[77, 51, 0, 0.00363183864265928, 0.5214964473447999, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 41.955 ],
[73, 72, 0, 0.025475069252077563, 0.514402082018968, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 110.35799999999999 ],
[18, 40, 0, 0.01302770083102493, 0.26306018504072, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 56.43600000000001 ],
[492, 45, 0, 0.0308703030303719, 0.18370114733484796, 743.0, 743.0, 743.0, 0, 1, 1, -360, 70.03699999999999 ],
[10, 74, 0, 0.030167359187465374, 0.609150547206812, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 130.685 ],
[45, 511, 0, 0.08203371900826446, 0.05424014819960001, 248.0, 248.0, 248.0, 0, 1, 1, -360, 62.038000000000004 ],
[78, 32, 0, 0.013458795013850415, 0.48313777647302397, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 77.738 ],
[79, 80, 0, 0.0038086911357340715, 0.1367226831743568, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 21.999000000000002 ],
[81, 79, 0, 0.010767832409972299, 0.3865388099484561, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 62.195 ],
[34, 82, 0, 0.0015497520661157025, 0.00409874294399768, 495.0, 495.0, 495.0, 0, 1, 1, -360, 2.344 ],
[83, 84, 0, 0.00902611570247934, 0.0238720301499152, 495.0, 495.0, 495.0, 0, 1, 1, -360, 13.652000000000001 ],
[83, 499, 0, 0.04179570247933885, 0.0276350398834796, 248.0, 248.0, 248.0, 0, 1, 1, -360, 31.608 ],
[85, 86, 0, 0.00802354570637119, 0.28802563884886, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 46.343999999999994 ],
[87, 86, 0, 0.01904968836565097, 0.683837154069184, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 110.031 ],
[88, 89, 0, 0.00380297520661157, 0.010058007429140002, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.752000000000001 ],
[90, 86, 0, 0.012097818559556786, 0.434282055192244, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 69.877 ],
[91, 86, 0, 9.26246537396122e-05, 0.013299992817559201, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 1.07 ],
[86, 92, 0, 0.0001852493074792244, 0.0066499964087796005, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.07 ],
[86, 93, 0, 0.008152181440443215, 0.292643346635492, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 47.086999999999996 ],
[94, 86, 0, 0.012883829639889197, 0.46249792780547194, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 74.417 ],
[86, 95, 0, 0.010421052631578947, 0.37409026526870803, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 60.192 ],
[513, 517, 0, 0.0008733884297520661, 0.0023099144321748, 495.0, 495.0, 495.0, 0, 1, 1, -360, 1.321 ],
[97, 66, 0, 0.03812777008310249, 0.34217338998058805, 856.0, 856.0, 856.0, 0, 1, 1, -360, 110.113 ],
[42, 98, 0, 0.003091759002770083, 0.44394630230884, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 35.716 ],
[99, 100, 0, 0.016371537396121884, 0.587698093837988, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 94.56200000000001 ],
[42, 101, 0, 0.008165339335180054, 0.29311568282888, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 47.163000000000004 ],
[102, 42, 0, 0.012403047091412742, 0.44523901189173193, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 71.64 ],
[103, 87, 0, 0.007073060941828254, 0.25390556381756, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 40.854 ],
[104, 103, 0, 0.0028852146814404432, 0.1035721403291428, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.665 ],
[105, 87, 0, 0.006406682825484765, 0.22998422159488002, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 37.005 ],
[106, 107, 0, 0.005714219759923823, 0.11538365264216799, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 24.754 ],
[108, 107, 0, 0.0025427631578947367, 0.09127896939786201, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.687000000000001 ],
[109, 106, 0, 0.003030470914127424, 0.10878648330773438, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 17.504 ],
[110, 111, 0, 0.019821849030470913, 0.7115558306889919, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 114.491 ],
[87, 112, 0, 0.006135907202216068, 0.220264039928212, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 35.441 ],
[113, 87, 0, 0.003981648199445983, 0.14293141813921081, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 22.998 ],
[87, 85, 0, 0.011046225761772853, 0.3965324494097, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 63.803000000000004 ],
[110, 114, 0, 0.011665339335180056, 0.418757110306188, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 67.37899999999999 ],
[115, 116, 0, 0.007048925619834712, 0.07457124214588401, 991.0, 991.0, 991.0, 0, 1, 1, -360, 21.323 ],
[117, 118, 0, 0.005987534626038782, 0.21493782785077598, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 34.584 ],
[117, 119, 0, 0.0038738746537396117, 0.5562504472696961, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 44.751000000000005 ],
[117, 120, 0, 0.005886686288088643, 0.8452704781039522, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 68.003 ],
[121, 122, 0, 0.0021170360110803325, 0.0759964075574972, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 12.228 ],
[123, 124, 0, 0.0018386426592797783, 0.0660027680945204, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 10.62 ],
[125, 126, 0, 0.004941135734072022, 0.17737467056702802, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 28.54 ],
[127, 119, 0, 0.0029027008310249305, 0.1041998502705648, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.766 ],
[118, 128, 0, 0.007397160664819945, 0.265539950057812, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 42.726000000000006 ],
[121, 119, 0, 0.002552458448753463, 0.0916270065931116, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.743 ],
[530, 527, 0, 0.022726611570247933, 0.060106736329903994, 495.0, 495.0, 495.0, 0, 1, 1, -360, 34.374 ],
[125, 130, 0, 0.002931440443213297, 0.105231531956442, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.932000000000002 ],
[125, 123, 0, 0.0019078081717451524, 0.2739425623421336, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 22.039 ],
[131, 132, 0, 0.0035744459833795014, 0.12831385593973843, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.646 ],
[133, 123, 0, 0.003864439058171745, 0.13872389704704202, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 22.320999999999998 ],
[524, 134, 0, 0.008092231404958678, 0.08560847143881999, 991.0, 991.0, 991.0, 0, 1, 1, -360, 24.479 ],
[135, 136, 0, 0.005242901662049862, 0.1882073282678, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 30.283 ],
[123, 131, 0, 0.003138331024930748, 0.1126583971045252, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 18.127 ],
[117, 128, 0, 0.010800034626038782, 0.38769479063117196, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 62.381 ],
[137, 521, 0, 0.013832396694214875, 0.14633421587532003, 991.0, 991.0, 991.0, 0, 2, 1, -360, 41.843 ],
[531, 514, 0, 0.0059504132231404955, 0.035409362037522, 743.0, 743.0, 743.0, 0, 1, 1, -360, 13.5 ],
[139, 521, 0, 0.021257520661157023, 0.05622132386323199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 32.152 ],
[140, 514, 0, 0.018527603305785127, 0.04900131122836401, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.023000000000003 ],
[522, 141, 0, 0.012168595041322314, 0.032183175718526795, 495.0, 495.0, 495.0, 0, 1, 1, -360, 18.405 ],
[142, 523, 0, 0.007060165289256198, 0.0746901476577608, 991.0, 991.0, 991.0, 0, 2, 1, -360, 21.357 ],
[530, 526, 0, 0.020281652892561983, 0.053640374808152, 495.0, 495.0, 495.0, 0, 1, 1, -360, 30.676 ],
[140, 532, 0, 0.004669090909090909, 0.0123486871461184, 495.0, 495.0, 495.0, 0, 1, 1, -360, 7.062 ],
[142, 144, 0, 0.006678126721756199, 0.0397397958689204, 743.0, 743.0, 743.0, 0, 1, 1, -360, 15.151 ],
[140, 522, 0, 0.020450247933884298, 0.05408627047793199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 30.930999999999997 ],
[145, 146, 0, 0.028527603305785125, 0.07544904460236, 495.0, 495.0, 495.0, 0, 1, 1, -360, 43.148 ],
[147, 523, 0, 0.02461289256198347, 0.0650955220034416, 495.0, 495.0, 495.0, 0, 2, 1, -360, 37.227 ],
[144, 523, 0, 0.008479338842975206, 0.0224259292904064, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.825 ],
[139, 523, 0, 0.029245619834710742, 0.0193370088934308, 248.0, 248.0, 248.0, 0, 1, 1, -360, 22.116999999999997 ],
[140, 141, 0, 0.008362975206611572, 0.022118173847506, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.649000000000001 ],
[528, 526, 0, 0.015389090909090908, 0.0407006573227188, 495.0, 495.0, 495.0, 0, 1, 1, -360, 23.276 ],
[528, 148, 0, 0.014306115702479338, 0.0378364333712244, 495.0, 495.0, 495.0, 0, 1, 1, -360, 21.638 ],
[149, 150, 0, 0.013604628099173552, 0.035981157661543604, 495.0, 495.0, 495.0, 0, 1, 1, -360, 20.576999999999998 ],
[145, 528, 0, 0.00320595041322314, 0.0084790121737992, 495.0, 495.0, 495.0, 0, 1, 1, -360, 4.849 ],
[530, 151, 0, 0.013144462809917355, 0.0347641247737036, 495.0, 495.0, 495.0, 0, 1, 1, -360, 19.881 ],
[524, 152, 0, 0.014598347107438016, 0.03860931919944, 495.0, 495.0, 495.0, 0, 1, 1, -360, 22.08 ],
[149, 525, 0, 0.016897190082644627, 0.17875695122823998, 991.0, 991.0, 991.0, 0, 2, 1, -360, 51.114 ],
[139, 514, 0, 0.007824132231404959, 0.020693056313687997, 495.0, 495.0, 495.0, 0, 1, 1, -360, 11.834000000000001 ],
[126, 120, 0, 0.012780297783933518, 0.458781387757004, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 73.819 ],
[530, 153, 0, 0.02254545454545455, 0.059627617060924, 495.0, 495.0, 495.0, 0, 1, 1, -360, 34.1 ],
[528, 147, 0, 0.15786710743801652, 0.104380679149868, 248.0, 248.0, 248.0, 0, 1, 1, -360, 119.387 ],
[528, 154, 0, 0.006528264462809917, 0.017265779790547203, 495.0, 495.0, 495.0, 0, 2, 1, -360, 9.874 ],
[130, 120, 0, 0.01450502077562327, 0.5206947188067639, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 83.781 ],
[528, 155, 0, 0.16064132231404957, 0.1062149715341, 248.0, 248.0, 248.0, 0, 1, 1, -360, 121.485 ],
[524, 533, 0, 0.004432727272727273, 0.0468942356109744, 991.0, 991.0, 991.0, 0, 1, 1, -360, 13.409 ],
[524, 149, 0, 0.0056413223140495865, 0.05968007537478799, 991.0, 991.0, 991.0, 0, 2, 1, -360, 17.065 ],
[154, 150, 0, 0.007539173553719007, 0.0199394052006688, 495.0, 495.0, 495.0, 0, 2, 1, -360, 11.402999999999999 ],
[157, 110, 0, 0.009962084487534625, 0.357614433044424, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 57.541000000000004 ],
[119, 158, 0, 0.0002490189289012004, 0.08045252664623159, 5134.0, 5134.0, 5134.0, 0, 3, 1, -360, 4.315 ],
[159, 60, 0, 0.010967451523545706, 0.0984261617997728, 856.0, 856.0, 856.0, 0, 1, 1, -360, 31.674 ],
[536, 161, 0, 0.021314380165289255, 0.056371704363524, 495.0, 495.0, 495.0, 0, 1, 1, -360, 32.238 ],
[115, 151, 0, 0.00379404958677686, 0.0401376047510724, 991.0, 991.0, 991.0, 0, 1, 1, -360, 11.477 ],
[162, 134, 0, 0.0015910743801652895, 0.016832124393744, 991.0, 991.0, 991.0, 0, 2, 1, -360, 4.813 ],
[115, 526, 0, 0.0037884297520661154, 0.010019537998747198, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.73 ],
[138, 87, 0, 0.0011838642659279777, 0.16999131006813442, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 13.675999999999998 ],
[123, 163, 0, 0.0022778739612188364, 0.08177009602828919, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.157 ],
[112, 164, 0, 0.0008672957063711912, 0.12453516639176802, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 10.019 ],
[112, 165, 0, 0.005989439058171744, 0.21500619230086396, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 34.595 ],
[166, 165, 0, 0.002632790858725762, 0.09451074335350361, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 15.207 ],
[167, 537, 0, 0.00832595041322314, 0.08808100664460242, 991.0, 991.0, 991.0, 0, 2, 1, -360, 25.186 ],
[168, 104, 0, 0.002552458448753463, 0.0916270065931116, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.743 ],
[531, 520, 0, 0.016156694214876033, 0.042730794079516396, 495.0, 495.0, 495.0, 0, 1, 1, -360, 24.436999999999998 ],
[139, 520, 0, 0.010682314049586776, 0.0282522993797748, 495.0, 495.0, 495.0, 0, 1, 1, -360, 16.157 ],
[520, 169, 0, 0.0011328925619834712, 0.0119849761681232, 991.0, 991.0, 991.0, 0, 2, 1, -360, 3.427 ],
[168, 105, 0, 0.007340893351800554, 0.26352009133553606, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 42.401 ],
[520, 170, 0, 0.005842644628099174, 0.015452470732151198, 495.0, 495.0, 495.0, 0, 2, 1, -360, 8.837 ],
[171, 89, 0, 0.005505454545454546, 0.058242717567848004, 991.0, 991.0, 991.0, 0, 1, 1, -360, 16.654 ],
[521, 172, 0, 0.006304793388429752, 0.06669899780522001, 991.0, 991.0, 991.0, 0, 1, 1, -360, 19.072 ],
[123, 173, 0, 0.005247403047091413, 0.18836891696656402, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 30.309 ],
[521, 174, 0, 0.013300495867768597, 0.035176796844864404, 495.0, 495.0, 495.0, 0, 1, 1, -360, 20.117 ],
[37, 39, 0, 0.004338873499549862, 0.35044859579205606, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 37.592 ],
[530, 175, 0, 0.013128595041322313, 0.0347221581224188, 495.0, 495.0, 495.0, 0, 1, 1, -360, 19.857 ],
[530, 176, 0, 0.005685289256198347, 0.01503630144005, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.599 ],
[88, 530, 0, 0.006015867768595041, 0.0159106066755372, 495.0, 495.0, 495.0, 0, 1, 1, -360, 9.099 ],
[177, 496, 0, 0.018632066115702478, 0.19711036673178398, 991.0, 991.0, 991.0, 0, 2, 1, -360, 56.361999999999995 ],
[178, 525, 0, 0.03106842975206612, 0.08216895464241199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 46.99100000000001 ],
[179, 493, 0, 0.057079669421487594, 0.15096278779194802, 495.0, 495.0, 495.0, 0, 1, 1, -360, 86.333 ],
[180, 181, 0, 0.041027438016528923, 0.10850827416682, 495.0, 495.0, 495.0, 0, 1, 1, -360, 62.053999999999995 ],
[182, 180, 0, 0.00866314049586777, 0.09164817200545601, 991.0, 991.0, 991.0, 0, 2, 1, -360, 26.206 ],
[179, 181, 0, 0.01957223140495868, 0.051764115772731996, 495.0, 495.0, 495.0, 0, 1, 1, -360, 29.603 ],
[180, 493, 0, 0.06676561983471074, 0.17657993119175203, 495.0, 495.0, 495.0, 0, 1, 1, -360, 100.98299999999999 ],
[183, 30, 0, 0.0024804362880886427, 0.356166349712776, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 28.654 ],
[183, 21, 0, 0.0025647506925207757, 0.36827307214930394, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 29.628 ],
[538, 185, 0, 0.018631404958677687, 0.0123189607681008, 248.0, 248.0, 248.0, 0, 1, 1, -360, 14.09 ],
[538, 89, 0, 0.014509752066115702, 0.038375005396288, 495.0, 495.0, 495.0, 0, 1, 1, -360, 21.945999999999998 ],
[184, 186, 0, 0.0016554709141274237, 0.059427351084826, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 9.562000000000001 ],
[184, 187, 0, 0.002698753462603878, 0.09687863927102919, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 15.588 ],
[520, 172, 0, 0.0034188429752066113, 0.0361682589818792, 991.0, 991.0, 991.0, 0, 2, 1, -360, 10.342 ],
[89, 175, 0, 0.0037309090909090903, 0.0098674088877672, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.643 ],
[185, 89, 0, 0.005812892561983471, 0.0153737832609196, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.792 ],
[89, 188, 0, 0.003108760330578513, 0.008221966434607202, 495.0, 495.0, 495.0, 0, 1, 1, -360, 4.702 ],
[189, 190, 0, 0.008599492151454294, 0.17364414688031998, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 37.253 ],
[539, 172, 0, 0.0021570247933884296, 0.022819366646419197, 991.0, 991.0, 991.0, 0, 2, 1, -360, 6.525 ],
[504, 192, 0, 0.0003084297520661157, 0.00326290713886456, 991.0, 991.0, 991.0, 0, 2, 1, -360, 0.9329999999999999 ],
[105, 186, 0, 0.003273372576177285, 0.1175060580379876, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 18.907 ],
[105, 187, 0, 0.0021712257617728533, 0.0779416868808324, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 12.540999999999999 ],
[539, 193, 0, 0.005608595041322314, 0.01483346262541, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.482999999999999 ],
[187, 194, 0, 4.8649584487534626e-05, 0.0069856037041576, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 0.562 ],
[539, 540, 0, 0.004394710743801653, 0.0116230138006708, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.647 ],
[539, 196, 0, 0.00332297520661157, 0.008788516227194, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.026 ],
[197, 540, 0, 0.004737190082644629, 0.012528794024621601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 7.165 ],
[110, 198, 0, 0.00018724030470914128, 0.02688587333118328, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 2.1630000000000003 ],
[197, 539, 0, 0.009172231404958677, 0.024258473063998802, 495.0, 495.0, 495.0, 0, 1, 1, -360, 13.873 ],
[199, 537, 0, 0.03612826446280991, 0.0238877676441712, 248.0, 248.0, 248.0, 0, 1, 1, -360, 27.322 ],
[134, 526, 0, 0.007771239669421488, 0.020553167475975197, 495.0, 495.0, 495.0, 0, 1, 1, -360, 11.754000000000001 ],
[200, 193, 0, 0.0009322314049586776, 0.009862163056380801, 991.0, 991.0, 991.0, 0, 2, 1, -360, 2.82 ],
[4, 201, 0, 0.013726108033240996, 0.49273365914097605, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 79.282 ],
[202, 86, 0, 0.00013365650969529087, 0.00479794133417816, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.772 ],
[85, 203, 0, 0.0019011426592797783, 0.2729854600553416, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 21.962 ],
[147, 204, 0, 0.0073874380165289254, 0.0781523963903056, 991.0, 991.0, 991.0, 0, 2, 1, -360, 22.346999999999998 ],
[147, 205, 0, 0.005959669421487603, 0.00394049369636956, 248.0, 248.0, 248.0, 0, 1, 1, -360, 4.507 ],
[123, 206, 0, 0.0005753116343490305, 0.0826091142668064, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 6.646 ],
[537, 207, 0, 0.018456198347107437, 0.048812461297776, 495.0, 495.0, 495.0, 0, 1, 1, -360, 27.915 ],
[165, 208, 0, 0.00414612188365651, 0.14883562055771601, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 23.948 ],
[4, 94, 0, 0.013687673130193905, 0.49135394025941603, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 79.06 ],
[4, 2, 0, 5.2054478301015697e-05, 0.016817654469309, 5134.0, 5134.0, 5134.0, 0, 3, 1, -360, 0.902 ],
[209, 4, 0, 0.0022369286703601107, 0.32120104149338397, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 25.840999999999998 ],
[119, 163, 0, 0.003535145429362881, 0.12690306230914922, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.419 ],
[210, 3, 0, 0.0003150969529085873, 0.011311208844832242, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.82 ],
[99, 211, 0, 0.0035045013850415513, 0.1258030161741948, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.242 ],
[99, 69, 0, 0.021717970914127423, 0.7796219621557, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 125.443 ],
[212, 99, 0, 0.008453774238227147, 0.30346978938770003, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 48.82899999999999 ],
[213, 214, 0, 0.01490115702479339, 0.15764073118032798, 991.0, 991.0, 991.0, 0, 2, 1, -360, 45.076 ],
[510, 215, 0, 0.002174710743801653, 0.09202587186721281, 1981.0, 1981.0, 1981.0, 0, 4, 1, -360, 13.157 ],
[128, 69, 0, 0.010711651662049862, 1.538088234801848, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 123.741 ],
[216, 69, 0, 0.009628462603878117, 1.3825528982351443, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 111.228 ],
[217, 98, 0, 0.0012787396121883656, 0.045903620070299994, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 7.386 ],
[504, 218, 0, 0.027480991735537193, 0.072680994226412, 495.0, 495.0, 495.0, 0, 1, 1, -360, 41.565 ],
[177, 504, 0, 0.07054809917355372, 0.18658373169634002, 495.0, 495.0, 495.0, 0, 1, 1, -360, 106.704 ],
[219, 209, 0, 0.003938798476454294, 0.5655728721401839, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 45.501000000000005 ],
[219, 220, 0, 0.0013026315789473684, 0.1870451326342096, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 15.048 ],
[94, 95, 0, 0.01070740997229917, 0.38436979242743197, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 61.846000000000004 ],
[159, 221, 0, 0.009937153739612188, 0.356719480257712, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 57.397 ],
[34, 161, 0, 0.010965289256198347, 0.116002818645824, 991.0, 991.0, 991.0, 0, 2, 1, -360, 33.17 ],
[222, 221, 0, 0.0046457756232686975, 0.16677196601221997, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 26.834 ],
[211, 52, 0, 0.05267313019390582, 0.472709090515552, 856.0, 856.0, 856.0, 0, 1, 1, -360, 152.12 ],
[215, 223, 0, 0.04873190082644628, 0.128884831985184, 495.0, 495.0, 495.0, 0, 1, 1, -360, 73.707 ],
[224, 215, 0, 0.019086280991735535, 0.050478887076288004, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.868000000000002 ],
[225, 224, 0, 0.04200925619834711, 0.11110496071615601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 63.538999999999994 ],
[224, 223, 0, 0.031061818181818183, 0.082151468537468, 495.0, 495.0, 495.0, 0, 1, 1, -360, 46.981 ],
[226, 6, 0, 0.06420099173553719, 0.0424492677936932, 248.0, 248.0, 248.0, 0, 1, 1, -360, 48.552 ],
[7, 3, 0, 0.009332929362880887, 0.335029305054692, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 53.907 ],
[216, 227, 0, 0.01989941135734072, 0.7143401282507, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 114.939 ],
[228, 229, 0, 0.010545454545454545, 0.027890337012274, 495.0, 495.0, 495.0, 0, 1, 1, -360, 15.95 ],
[227, 230, 0, 0.003993074792243767, 0.573366419334696, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 46.128 ],
[231, 53, 0, 0.007193213296398893, 1.0328749562310842, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 83.096 ],
[544, 545, 0, 0.013061818181818181, 0.034545548464856, 495.0, 495.0, 495.0, 0, 1, 1, -360, 19.756 ],
[234, 235, 0, 0.04608859504132231, 0.121893887321888, 495.0, 495.0, 495.0, 0, 1, 1, -360, 69.709 ],
[546, 214, 0, 0.057025454545454546, 0.15081940173295602, 495.0, 495.0, 495.0, 0, 1, 1, -360, 86.251 ],
[233, 227, 0, 0.0029001038781163438, 0.1041066260218888, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.750999999999998 ],
[237, 238, 0, 0.026324628099173554, 0.06962267451304, 495.0, 495.0, 495.0, 0, 1, 1, -360, 39.816 ],
[212, 100, 0, 0.007955505540166205, 0.285583163531816, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 45.951 ],
[519, 239, 0, 0.01740429752066116, 0.046030422038308406, 495.0, 495.0, 495.0, 0, 1, 1, -360, 26.324 ],
[238, 519, 0, 0.015166280991735538, 0.040111375593995205, 495.0, 495.0, 495.0, 0, 1, 1, -360, 22.939 ],
[213, 240, 0, 0.01665388429752066, 0.04404574915373599, 1200.0, 1200.0, 1200.0, 0, 1, 1, -360, 25.189 ],
[241, 242, 0, 0.009862015235457064, 0.3540221919932281, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 56.963 ],
[70, 241, 0, 0.003819858033240997, 0.5484941897752321, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 44.126999999999995 ],
[509, 213, 0, 0.011363636363636364, 0.120216969880216, 991.0, 991.0, 991.0, 0, 2, 1, -360, 34.375 ],
[68, 243, 0, 0.003611668975069252, 0.1296500701715312, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.861 ],
[243, 244, 0, 0.0007699099722991691, 0.027637882270859202, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 4.447 ],
[68, 244, 0, 0.004104051246537396, 0.147325387728876, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 23.705 ],
[544, 547, 0, 0.02418776859504132, 0.255884661882476, 991.0, 991.0, 991.0, 0, 1, 1, -360, 73.168 ],
[245, 227, 0, 0.012676419667590028, 0.45505241780707606, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 73.219 ],
[246, 208, 0, 0.0010155817174515235, 0.0364568961999408, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 5.8660000000000005 ],
[112, 208, 0, 0.0017927631578947367, 0.0643558063672372, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 10.355 ],
[165, 247, 0, 0.0002113919667590028, 0.0075884538459086, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.2209999999999999 ],
[537, 549, 0, 0.00032066115702479337, 0.00084807607842936, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.485 ],
[537, 550, 0, 0.00032198347107438016, 0.0008515732993697601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.48700000000000004 ],
[537, 551, 0, 0.0002651239669421488, 0.0007011927988648, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.401 ],
[110, 251, 0, 0.00023857340720221602, 0.008564200982522441, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.3780000000000001 ],
[510, 252, 0, 0.08467702479338843, 0.055987884365424005, 248.0, 248.0, 248.0, 0, 1, 1, -360, 64.03699999999999 ],
[529, 253, 0, 0.04859504132231405, 0.12852286961777998, 495.0, 495.0, 495.0, 0, 1, 1, -360, 73.5 ],
[237, 239, 0, 0.03309421487603306, 0.08752669712542799, 495.0, 495.0, 495.0, 0, 1, 1, -360, 50.055 ],
[254, 238, 0, 0.07815008264462811, 0.05167231372274401, 248.0, 248.0, 248.0, 0, 1, 1, -360, 59.101000000000006 ],
[69, 255, 0, 0.0009369806094182826, 0.134541235754472, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 10.824000000000002 ],
[510, 225, 0, 0.021953719008264466, 0.232250442756508, 991.0, 991.0, 991.0, 0, 1, 1, -360, 66.41 ],
[256, 257, 0, 0.010125619834710746, 0.0267799693631888, 495.0, 495.0, 495.0, 0, 1, 1, -360, 15.315 ],
[258, 190, 0, 0.011717451523545707, 0.10515695255750121, 856.0, 856.0, 856.0, 0, 1, 1, -360, 33.84 ],
[258, 259, 0, 0.015782548476454293, 0.1416387085570408, 856.0, 856.0, 856.0, 0, 1, 1, -360, 45.58 ],
[260, 261, 0, 0.006791031855955679, 0.9751256416231477, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 78.45 ],
[554, 553, 0, 0.17583338842975205, 0.11625986438453201, 248.0, 248.0, 248.0, 0, 1, 1, -360, 132.974 ],
[515, 263, 0, 0.006987107438016529, 0.0739172618295936, 991.0, 991.0, 991.0, 0, 2, 1, -360, 21.136 ],
[14, 264, 0, 0.01700694214876033, 0.17991802858084, 991.0, 991.0, 991.0, 0, 1, 1, -360, 51.446000000000005 ],
[116, 555, 0, 0.0009768595041322315, 0.0103342878835768, 991.0, 991.0, 991.0, 0, 2, 1, -360, 2.955 ],
[151, 116, 0, 0.007244958677685951, 0.0191612735410668, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.958 ],
[111, 114, 0, 0.008806613573407202, 0.3161358573133961, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 50.867 ],
[77, 111, 0, 0.00288452216066482, 0.41418912211817605, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 33.321999999999996 ],
[266, 525, 0, 0.01042909090909091, 0.027582581569373602, 495.0, 495.0, 495.0, 0, 1, 1, -360, 15.774000000000001 ],
[267, 120, 0, 0.013136945983379503, 0.471584184581432, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 75.87899999999999 ],
[268, 269, 0, 0.0010327272727272726, 0.0027313295556817604, 495.0, 495.0, 495.0, 0, 1, 1, -360, 1.5619999999999998 ],
[556, 271, 0, 0.052289586776859506, 0.0345735262323792, 248.0, 248.0, 248.0, 0, 1, 1, -360, 39.544000000000004 ],
[556, 272, 0, 0.04685355371900827, 0.030979257409249603, 248.0, 248.0, 248.0, 0, 1, 1, -360, 35.433 ],
[529, 273, 0, 0.0034604958677685953, 0.009152227205140799, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.234 ],
[128, 274, 0, 0.0029350761772853184, 0.1053620459045884, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.953 ],
[34, 275, 0, 0.0008290909090909092, 0.00054818938265696, 248.0, 248.0, 248.0, 0, 1, 1, -360, 0.627 ],
[503, 276, 0, 0.006707438016528925, 0.07095861291266, 991.0, 991.0, 991.0, 0, 2, 1, -360, 20.29 ],
[503, 504, 0, 0.06432727272727272, 0.680524223098808, 991.0, 991.0, 991.0, 0, 2, 1, -360, 194.59 ],
[177, 218, 0, 0.04330380165289256, 0.114528740018308, 495.0, 495.0, 495.0, 0, 1, 1, -360, 65.497 ],
[277, 278, 0, 0.007191135734072023, 1.032576638635032, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 83.072 ],
[557, 558, 0, 0.04341289256198347, 0.258338836678648, 743.0, 743.0, 743.0, 0, 1, 1, -360, 98.493 ],
[557, 559, 0, 0.03415867768595042, 0.09034195998366001, 495.0, 495.0, 495.0, 0, 1, 1, -360, 51.665 ],
[559, 558, 0, 0.04474314049586777, 0.11833546501370001, 495.0, 495.0, 495.0, 0, 1, 1, -360, 67.67399999999999 ],
[277, 78, 0, 0.03585768698060942, 0.32180078416049196, 856.0, 856.0, 856.0, 0, 1, 1, -360, 103.557 ],
[277, 279, 0, 0.021390927977839334, 0.191970480441328, 856.0, 856.0, 856.0, 0, 1, 1, -360, 61.777 ],
[78, 279, 0, 0.015811980609418283, 0.1419028439283376, 856.0, 856.0, 856.0, 0, 1, 1, -360, 45.665 ],
[281, 282, 0, 0.0023178670360110803, 0.08320574945862161, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.388 ],
[283, 161, 0, 0.036741157024793386, 0.09717203248350399, 495.0, 495.0, 495.0, 0, 2, 1, -360, 55.571000000000005 ],
[268, 161, 0, 0.018883636363636366, 0.199771751868832, 991.0, 991.0, 991.0, 0, 2, 1, -360, 57.123000000000005 ],
[256, 284, 0, 0.010755371900826446, 0.113782083346976, 991.0, 991.0, 991.0, 0, 2, 1, -360, 32.535 ],
[515, 516, 0, 0.04071140495867769, 0.107672438361532, 495.0, 495.0, 495.0, 0, 1, 1, -360, 61.576 ],
[263, 516, 0, 0.0030355371900826445, 0.128452925198488, 1981.0, 1981.0, 1981.0, 0, 2, 1, -360, 18.365 ],
[516, 285, 0, 0.006908429752066116, 0.018271230811372, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.449000000000002 ],
[63, 286, 0, 0.019088925619834708, 0.050485881518556, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.872 ],
[287, 516, 0, 0.01732892561983471, 0.011457770111127998, 248.0, 248.0, 248.0, 0, 1, 1, -360, 13.105 ],
[8, 102, 0, 0.015100069252077563, 0.542055501663692, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 87.21799999999999 ],
[8, 101, 0, 0.019246883656509697, 0.69091598202144, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 111.17 ],
[80, 288, 0, 0.007984072022160666, 0.2866086302684072, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 46.11600000000001 ],
[80, 289, 0, 0.0003782317636201524, 0.122198345223416, 5134.0, 5134.0, 5134.0, 0, 4, 1, -360, 6.553999999999999 ],
[276, 560, 0, 0.01778314049586777, 0.047032375838192794, 495.0, 495.0, 495.0, 0, 2, 1, -360, 26.897 ],
[37, 290, 0, 0.005629501385041551, 0.4546919507138321, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 48.773999999999994 ],
[290, 74, 0, 0.02071595106187673, 1.673216783321968, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 179.483 ],
[512, 291, 0, 0.0053299173553719, 0.056385693247479204, 991.0, 991.0, 991.0, 0, 2, 1, -360, 16.123 ],
[78, 292, 0, 0.0058149815327908595, 0.469673087481408, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 50.381 ],
[199, 548, 0, 0.0015530578512396695, 0.00410748599634868, 495.0, 495.0, 495.0, 0, 1, 1, -360, 2.349 ],
[491, 293, 0, 0.014176528925619833, 0.009373426429729999, 248.0, 248.0, 248.0, 0, 1, 1, -360, 10.720999999999998 ],
[4, 294, 0, 9.669321329639889e-05, 0.013884198109531681, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 1.117 ],
[490, 541, 0, 0.050580495867768596, 0.133773946861896, 495.0, 495.0, 495.0, 0, 1, 1, -360, 76.503 ],
[491, 295, 0, 0.010613553719008264, 0.028070443890777202, 495.0, 495.0, 495.0, 0, 1, 1, -360, 16.053 ],
[491, 296, 0, 0.004400661157024794, 0.0116387512948784, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.656000000000001 ],
[295, 297, 0, 0.020297520661157024, 0.053682341459340005, 495.0, 495.0, 495.0, 0, 1, 1, -360, 30.7 ],
[508, 161, 0, 0.023239669421487603, 0.061463658055360006, 495.0, 495.0, 495.0, 0, 1, 1, -360, 35.15 ],
[117, 123, 0, 0.005876211911357341, 0.21094161505628, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 33.941 ],
[133, 117, 0, 0.004469182825484764, 0.0401081792747688, 856.0, 856.0, 856.0, 0, 1, 1, -360, 12.907 ],
[71, 74, 0, 0.03904524469065097, 0.7884161162841721, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 169.144 ],
[74, 278, 0, 0.0077122576177285325, 1.10740463560792, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 89.09200000000001 ],
[298, 515, 0, 0.021701157024793388, 0.05739464148919599, 495.0, 495.0, 495.0, 0, 1, 1, -360, 32.823 ],
[5, 299, 0, 0.0016232686980609415, 0.058271370400665996, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 9.376 ],
[32, 292, 0, 0.009679362880886427, 0.34746541983297996, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 55.908 ],
[5, 29, 0, 0.00743395083102493, 1.0674425076571843, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 85.87700000000001 ],
[503, 560, 0, 0.015140495867768593, 0.160172719142436, 991.0, 991.0, 991.0, 0, 1, 1, -360, 45.8 ],
[300, 301, 0, 0.004892053324099723, 0.7024509290644521, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 56.513000000000005 ],
[51, 300, 0, 0.002573493767313019, 0.3695284920307039, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 29.729 ],
[244, 302, 0, 0.007714508310249307, 1.107727813004004, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 89.118 ],
[31, 302, 0, 0.004369113573407203, 0.6273619041941161, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 50.472 ],
[51, 282, 0, 0.006288434903047093, 0.9029576432132521, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 72.64399999999999 ],
[303, 304, 0, 8.795013850415512e-05, 0.000789298639172312, 856.0, 856.0, 856.0, 0, 1, 1, -360, 0.254 ],
[305, 304, 0, 0.003881117266849031, 0.0783689646873844, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 16.813 ],
[305, 259, 0, 0.0025625, 0.36794989475177603, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 29.601999999999997 ],
[306, 307, 0, 0.03223268698060942, 0.289268628831688, 856.0, 856.0, 856.0, 0, 1, 1, -360, 93.088 ],
[305, 308, 0, 0.0024272853185595567, 0.0217833994511184, 856.0, 856.0, 856.0, 0, 1, 1, -360, 7.01 ],
[305, 309, 0, 0.011014773776523545, 0.22241441259921202, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 47.716 ],
[310, 309, 0, 0.009565962603878117, 0.343394627639832, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 55.253 ],
[306, 309, 0, 0.035333795013850415, 0.31709917455019604, 856.0, 856.0, 856.0, 0, 1, 1, -360, 102.044 ],
[311, 280, 0, 0.003433691135734072, 0.1232611016590444, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 19.833 ],
[280, 278, 0, 0.009749769159764544, 0.7874838737974121, 2567.0, 2567.0, 2567.0, 0, 1, 1, -360, 84.47200000000001 ],
[311, 32, 0, 0.01205909510619806, 0.9740069506375919, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 104.48 ],
[13, 312, 0, 0.0043324965373961214, 0.622104056565324, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 50.049 ],
[313, 314, 0, 0.006092624653739613, 0.218710302449316, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 35.191 ],
[312, 313, 0, 0.00893957756232687, 0.32090893884734, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 51.635 ],
[547, 566, 0, 0.027035702479338848, 0.286013220297816, 991.0, 991.0, 991.0, 0, 1, 1, -360, 81.783 ],
[245, 315, 0, 0.014162569252077564, 0.508401547875772, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 81.803 ],
[312, 316, 0, 8.803670360110802e-05, 0.01264120812658816, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 1.0170000000000001 ],
[312, 314, 0, 0.005339854570637119, 0.191687700220296, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 30.843000000000004 ],
[554, 546, 0, 0.08174743801652892, 0.21620344446439202, 495.0, 495.0, 495.0, 0, 1, 1, -360, 123.64299999999999 ],
[262, 216, 0, 0.042641966759002774, 0.38268554099981195, 856.0, 856.0, 856.0, 0, 1, 1, -360, 123.15 ],
[317, 233, 0, 0.005647276084951523, 0.114031901035644, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 24.464000000000002 ],
[318, 317, 0, 0.008311634349030471, 0.16783161497270002, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 36.006 ],
[231, 52, 0, 0.035263677285318554, 1.2658796434850879, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 203.683 ],
[319, 567, 0, 0.006089586776859504, 0.0644223069721, 991.0, 991.0, 991.0, 0, 1, 1, -360, 18.421 ],
[557, 321, 0, 0.010004628099173555, 0.10583989458750401, 991.0, 991.0, 991.0, 0, 2, 1, -360, 30.264 ],
[277, 65, 0, 0.009430170821779778, 0.7616700793261759, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 81.703 ],
[322, 288, 0, 0.006545013850415513, 0.528637424797136, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 56.706 ],
[322, 323, 0, 0.0018503000923372577, 0.14944779312484, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 16.031 ],
[277, 324, 0, 0.019719529085872576, 0.39818407235049996, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 85.425 ],
[324, 325, 0, 0.01103508771932133, 0.22282459929396403, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 47.803999999999995 ],
[277, 325, 0, 0.008665743305609418, 0.174981914850048, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 37.54 ],
[326, 327, 0, 0.007654214876033058, 0.0202436634226288, 495.0, 495.0, 495.0, 0, 1, 1, -360, 11.577 ],
[328, 326, 0, 0.10300958677685952, 0.068109252150368, 248.0, 248.0, 248.0, 0, 1, 1, -360, 77.90100000000001 ],
[328, 327, 0, 0.09827173553719008, 0.064976616491468, 248.0, 248.0, 248.0, 0, 1, 1, -360, 74.318 ],
[326, 329, 0, 0.028062148760330575, 0.07421802283046801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 42.443999999999996 ],
[568, 329, 0, 0.05699900826446282, 0.15074945731414802, 495.0, 495.0, 495.0, 0, 1, 1, -360, 86.211 ],
[568, 326, 0, 0.03218644628099173, 0.08512585494846397, 495.0, 495.0, 495.0, 0, 1, 1, -360, 48.681999999999995 ],
[332, 78, 0, 0.006471029547541551, 0.522661750455416, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 56.065 ],
[333, 306, 0, 0.008580159279778392, 0.308006702824228, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 49.559 ],
[332, 333, 0, 0.007504674515235457, 0.26939943395502003, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 43.347 ],
[332, 334, 0, 0.017124653739612188, 0.15368328149175597, 856.0, 856.0, 856.0, 0, 1, 1, -360, 49.456 ],
[66, 334, 0, 0.030625, 0.27484062260471603, 856.0, 856.0, 856.0, 0, 1, 1, -360, 88.445 ],
[330, 335, 0, 0.00550536703601108, 0.790516769355108, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 63.598 ],
[336, 66, 0, 0.015054362880886425, 0.1351036887216764, 856.0, 856.0, 856.0, 0, 1, 1, -360, 43.477 ],
[330, 336, 0, 0.039036357340720224, 0.350327404269788, 856.0, 856.0, 856.0, 0, 1, 1, -360, 112.73700000000001 ],
[68, 70, 0, 0.016314058171745152, 0.14640868261713597, 856.0, 856.0, 856.0, 0, 1, 1, -360, 47.115 ],
[509, 337, 0, 0.03494082644628099, 0.09241056617056001, 495.0, 495.0, 495.0, 0, 1, 1, -360, 52.848 ],
[324, 288, 0, 0.012627423822714683, 0.11332339674541761, 856.0, 856.0, 856.0, 0, 1, 1, -360, 36.468 ],
[338, 559, 0, 0.009228099173553718, 0.097624922595552, 991.0, 991.0, 991.0, 0, 2, 1, -360, 27.915 ],
[339, 559, 0, 0.03560595041322315, 0.023542417076125203, 248.0, 248.0, 248.0, 0, 1, 1, -360, 26.927 ],
[339, 340, 0, 0.08711537190082644, 0.23040041287850396, 495.0, 495.0, 495.0, 0, 1, 1, -360, 131.762 ],
[559, 340, 0, 0.20983272727272728, 0.138740000599684, 248.0, 248.0, 248.0, 0, 1, 1, -360, 158.686 ],
[341, 292, 0, 0.0009329409048961218, 0.07535316024134399, 2567.0, 2567.0, 2567.0, 0, 1, 1, -360, 8.083 ],
[557, 342, 0, 0.006019834710743802, 0.0636843933534336, 991.0, 991.0, 991.0, 0, 2, 1, -360, 18.21 ],
[558, 343, 0, 0.010650247933884296, 0.11266996708783199, 991.0, 991.0, 991.0, 0, 1, 1, -360, 32.217 ],
[502, 340, 0, 0.021737520661157025, 0.22996326026071198, 991.0, 991.0, 991.0, 0, 2, 1, -360, 65.756 ],
[72, 32, 0, 0.00675502077562327, 0.969954803293024, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 78.03399999999999 ],
[344, 345, 0, 0.0005762927054480609, 0.04654686738645321, 2567.0, 2567.0, 2567.0, 0, 1, 1, -360, 4.993 ],
[346, 47, 0, 0.0011340027700831024, 0.04070792194158799, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 6.55 ],
[46, 47, 0, 0.0008975069252077563, 0.0322183003580208, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 5.184 ],
[346, 345, 0, 0.0007217797783933517, 0.025910126194627202, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 4.169 ],
[347, 328, 0, 0.029905454545454544, 0.07909314882361201, 495.0, 495.0, 495.0, 0, 1, 1, -360, 45.232 ],
[347, 348, 0, 0.04883438016528925, 0.129155866607944, 495.0, 495.0, 495.0, 0, 1, 1, -360, 73.862 ],
[571, 348, 0, 0.041548429752066116, 0.10988617921762801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 62.842 ],
[347, 572, 0, 0.016052231404958678, 0.04245451362512801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 24.279 ],
[571, 570, 0, 0.17379041322314048, 0.11490906279551602, 248.0, 248.0, 248.0, 0, 1, 1, -360, 131.429 ],
[14, 350, 0, 0.02166743801652892, 0.05730546235524, 495.0, 495.0, 495.0, 0, 1, 1, -360, 32.772 ],
[350, 573, 0, 0.026277685950413226, 0.06949852316919598, 495.0, 495.0, 495.0, 0, 1, 1, -360, 39.745 ],
[15, 351, 0, 0.02639265927977839, 0.236857956201204, 856.0, 856.0, 856.0, 0, 1, 1, -360, 76.222 ],
[352, 15, 0, 0.0015260560941828254, 0.219126704094076, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 17.629 ],
[15, 335, 0, 0.0035338758079432133, 1.1417173740880242, 5134.0, 5134.0, 5134.0, 0, 1, 1, -360, 61.235 ],
[232, 227, 0, 5.5747922437673134e-05, 0.000500303468136644, 1200.0, 1200.0, 1200.0, 0, 1, 1, -360, 0.161 ],
[565, 544, 0, 0.0394803305785124, 0.10441652566461601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 59.714 ],
[235, 567, 0, 0.02391404958677686, 0.25298896294275997, 991.0, 991.0, 991.0, 0, 1, 1, -360, 72.34 ],
[567, 286, 0, 0.008068760330578512, 0.34144067500694797, 1981.0, 1981.0, 1981.0, 0, 1, 1, -360, 48.816 ],
[353, 519, 0, 0.007621818181818182, 0.080631926038356, 991.0, 991.0, 991.0, 0, 1, 1, -360, 23.055999999999997 ],
[354, 353, 0, 0.0008436363636363636, 0.00892490784392768, 991.0, 991.0, 991.0, 0, 2, 1, -360, 2.552 ],
[355, 354, 0, 0.0068502479338842966, 0.0181173530898976, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.360999999999999 ],
[354, 356, 0, 0.01855404958677686, 0.049071255647172, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.063000000000002 ],
[357, 358, 0, 0.0034823407202216067, 0.5000300103406239, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 40.228 ],
[574, 359, 0, 0.013352066115702478, 0.0353131884615884, 495.0, 495.0, 495.0, 0, 1, 1, -360, 20.195 ],
[235, 575, 0, 0.007459504132231404, 0.0789147905557, 991.0, 991.0, 991.0, 0, 1, 1, -360, 22.565 ],
[167, 361, 0, 0.000616198347107438, 0.0065188198358579995, 991.0, 991.0, 991.0, 0, 1, 1, -360, 1.864 ],
[528, 362, 0, 0.0011960330578512398, 0.012652945368078402, 991.0, 991.0, 991.0, 0, 1, 1, -360, 3.6180000000000003 ],
[363, 344, 0, 0.0002662742382271468, 0.009558592968871479, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.538 ],
[259, 364, 0, 0.013069713758102496, 0.26390852570525997, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 56.618 ],
[54, 56, 0, 0.007723337950138504, 0.0693122289241068, 856.0, 856.0, 856.0, 0, 1, 1, -360, 22.305 ],
[365, 364, 0, 0.0049974607571537395, 0.10091058802821559, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 21.649 ],
[231, 366, 0, 0.0013273891966759002, 0.0476500209962672, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 7.667000000000001 ],
[30, 367, 0, 0.01126108033240997, 0.1010613005635992, 856.0, 856.0, 856.0, 0, 1, 1, -360, 32.522 ],
[61, 367, 0, 0.020337603878116343, 0.18251754162067196, 856.0, 856.0, 856.0, 0, 1, 1, -360, 58.735 ],
[254, 368, 0, 0.0004297520661157025, 0.00454638722456732, 991.0, 991.0, 991.0, 0, 1, 1, -360, 1.3 ],
[254, 369, 0, 0.00015999999999999999, 0.00169265493591832, 991.0, 991.0, 991.0, 0, 2, 1, -360, 0.484 ],
[254, 370, 0, 0.0003669421487603306, 0.0038819152455960805, 991.0, 991.0, 991.0, 0, 2, 1, -360, 1.11 ],
[99, 358, 0, 0.0020184383656509696, 0.28982797432374396, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 23.316999999999997 ],
[354, 519, 0, 0.006762644628099174, 0.07154264880985199, 991.0, 991.0, 991.0, 0, 1, 1, -360, 20.457 ],
[571, 371, 0, 0.023726942148760328, 0.06275238397221199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 35.887 ],
[207, 372, 0, 0.002329256198347108, 0.006160354689297601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.523 ],
[57, 373, 0, 0.0017725619834710745, 0.0046880246727212796, 495.0, 495.0, 495.0, 0, 1, 1, -360, 2.681 ],
[209, 374, 0, 0.0010122922437673131, 0.0363388121515216, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 5.847 ],
[375, 376, 0, 0.0045364727608518006, 0.0916021467933684, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 19.652 ],
[376, 377, 0, 0.0030886426592797783, 0.062367022394423606, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 13.38 ],
[16, 49, 0, 0.002266101108033241, 0.32538991773524, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 26.178 ],
[318, 377, 0, 0.004755078485685596, 0.0960163149704152, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 20.599 ],
[378, 297, 0, 0.01753917355371901, 0.046387138574374404, 495.0, 495.0, 495.0, 0, 1, 1, -360, 26.528000000000002 ],
[562, 379, 0, 0.01802314049586777, 0.047667121439141605, 495.0, 495.0, 495.0, 0, 1, 1, -360, 27.26 ],
[576, 563, 0, 0.001808264462809917, 0.004782449638150801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 2.735 ],
[576, 381, 0, 0.0034320661157024794, 0.009077036954898, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.191 ],
[577, 576, 0, 0.06004495867768594, 0.15880530575430396, 495.0, 495.0, 495.0, 0, 1, 1, -360, 90.818 ],
[244, 383, 0, 0.006845567867036011, 0.1382282547912684, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 29.655 ],
[244, 306, 0, 0.02679108956599723, 0.5409756541164079, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 116.059 ],
[383, 306, 0, 0.0300685595567867, 0.269846910348376, 856.0, 856.0, 856.0, 0, 1, 1, -360, 86.838 ],
[380, 306, 0, 0.00025605955678670365, 0.03676764369572, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 2.958 ],
[252, 225, 0, 0.062094545454545444, 0.041056499553586, 248.0, 248.0, 248.0, 0, 1, 1, -360, 46.958999999999996 ],
[220, 76, 0, 0.002772074099722992, 0.398042682239984, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 32.023 ],
[542, 384, 0, 0.007939834710743802, 0.020999063146094, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.009 ],
[385, 384, 0, 0.053734876033057856, 0.035529141854791196, 248.0, 248.0, 248.0, 0, 1, 1, -360, 40.637 ],
[542, 385, 0, 0.011306115702479337, 0.119608453436296, 991.0, 991.0, 991.0, 0, 2, 1, -360, 34.201 ],
[386, 385, 0, 0.003668760330578512, 0.0388121580140316, 991.0, 991.0, 991.0, 0, 1, 1, -360, 11.097999999999999 ],
[387, 578, 0, 0.015444628099173553, 0.16339016240905604, 991.0, 991.0, 991.0, 0, 1, 1, -360, 46.72 ],
[332, 388, 0, 0.014036184210526315, 0.5038646344377999, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 81.07300000000001 ],
[382, 332, 0, 0.017764369806094183, 0.637697365901468, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 102.60700000000001 ],
[382, 388, 0, 0.00476159972299169, 0.17092976750548, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 27.503 ],
[579, 578, 0, 0.01911074380165289, 0.050543585664, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.905 ],
[577, 387, 0, 0.07597818181818182, 0.20094506949431204, 495.0, 495.0, 495.0, 0, 1, 1, -360, 114.917 ],
[144, 390, 0, 0.0004277685950413223, 0.0011313509747276, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.647 ],
[37, 49, 0, 0.008441481994459835, 0.303028527944352, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 48.758 ],
[391, 233, 0, 0.014211218836565096, 0.1275369872004348, 856.0, 856.0, 856.0, 0, 1, 1, -360, 41.042 ],
[392, 310, 0, 0.007035318559556785, 0.06313767618386361, 856.0, 856.0, 856.0, 0, 1, 1, -360, 20.317999999999998 ],
[260, 393, 0, 0.006341412742382271, 0.0569102963692744, 856.0, 856.0, 856.0, 0, 1, 1, -360, 18.314 ],
[394, 230, 0, 0.0007590027700831025, 0.00681158510656168, 856.0, 856.0, 856.0, 0, 1, 1, -360, 2.1919999999999997 ],
[395, 282, 0, 0.008762984764542936, 0.314569689934484, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 50.615 ],
[395, 244, 0, 0.0034046052631578946, 0.12221699007344, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 19.665 ],
[25, 396, 0, 0.008809037396121884, 0.316222866612064, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 50.881 ],
[81, 74, 0, 0.0075207756232686974, 0.26997742429652244, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 43.44 ],
[278, 80, 0, 0.016286011080332407, 0.5846279085788, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 94.068 ],
[81, 278, 0, 0.021054016620498613, 0.755787629231688, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 121.60799999999999 ],
[569, 570, 0, 0.03253950413223141, 0.08605961294018, 495.0, 495.0, 495.0, 0, 1, 1, -360, 49.216 ],
[397, 552, 0, 0.006289586776859504, 0.0166345314104904, 1200.0, 1200.0, 1200.0, 0, 1, 1, -360, 9.513 ],
[542, 398, 0, 0.0005580165289256199, 0.0059033089500572, 991.0, 991.0, 991.0, 0, 1, 1, -360, 1.6880000000000002 ],
[398, 385, 0, 0.021893553719008262, 0.05790348713648401, 495.0, 495.0, 495.0, 0, 1, 1, -360, 33.114000000000004 ],
[399, 499, 0, 0.03266380165289256, 0.021597087927192803, 248.0, 248.0, 248.0, 0, 1, 1, -360, 24.701999999999998 ],
[83, 399, 0, 0.025700495867768593, 0.016992996557050798, 248.0, 248.0, 248.0, 0, 1, 1, -360, 19.436 ],
[498, 400, 0, 0.012134214876033058, 0.032092247974028, 495.0, 495.0, 495.0, 0, 1, 1, -360, 18.352999999999998 ],
[518, 239, 0, 0.04685289256198347, 0.123915281026504, 495.0, 495.0, 495.0, 0, 1, 1, -360, 70.865 ],
[575, 543, 0, 0.0030307438016528923, 0.032062521596058796, 991.0, 991.0, 991.0, 0, 1, 1, -360, 9.168 ],
[401, 360, 0, 0.007957063711911357, 0.071409774520472, 856.0, 856.0, 856.0, 0, 1, 1, -360, 22.98 ],
[580, 581, 0, 0.007134545454545454, 0.018869255592422397, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.790999999999999 ],
[401, 402, 0, 0.0033434903047091418, 0.030005778188384805, 856.0, 856.0, 856.0, 0, 1, 1, -360, 9.656 ],
[403, 231, 0, 0.009592105263157893, 0.08608327126915, 856.0, 856.0, 856.0, 0, 1, 1, -360, 27.701999999999998 ],
[189, 360, 0, 0.028456024930747923, 0.255375399471348, 856.0, 856.0, 856.0, 0, 1, 1, -360, 82.181 ],
[234, 404, 0, 0.008092561983471074, 0.0214029921648796, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.24 ],
[235, 404, 0, 0.05107504132231405, 0.13508190749437998, 495.0, 495.0, 495.0, 0, 1, 1, -360, 77.251 ],
[235, 580, 0, 0.000580495867768595, 0.00153527999352772, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.878 ],
[216, 259, 0, 0.0022115650969529088, 0.079389770210892, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 12.774000000000001 ],
[405, 259, 0, 0.0052832409972299165, 0.1896554115982928, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 30.516 ],
[405, 318, 0, 0.0066348684210526315, 0.23817552558268398, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 38.323 ],
[406, 230, 0, 8.098164819944598e-05, 0.046512685161986804, 6845.0, 6845.0, 6845.0, 0, 1, 1, -360, 1.871 ],
[542, 407, 0, 0.025569586776859506, 0.067625761355152, 495.0, 495.0, 495.0, 0, 1, 1, -360, 38.674 ],
[23, 408, 0, 0.03224528925619835, 0.08528148128033601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 48.771 ],
[577, 348, 0, 0.012999008264462809, 0.13751772188026398, 991.0, 991.0, 991.0, 0, 2, 1, -360, 39.321999999999996 ],
[562, 564, 0, 0.06921520661157024, 0.18305853298686803, 495.0, 495.0, 495.0, 0, 1, 1, -360, 104.68799999999999 ],
[582, 507, 0, 0.006357685950413223, 0.016814638289042002, 495.0, 495.0, 495.0, 0, 1, 1, -360, 9.616 ],
[27, 410, 0, 0.0030042975206611565, 0.007945685980170399, 495.0, 495.0, 495.0, 0, 1, 1, -360, 4.544 ],
[501, 27, 0, 0.003811570247933884, 0.040322957460962, 991.0, 991.0, 991.0, 0, 1, 1, -360, 11.53 ],
[27, 411, 0, 0.004648595041322314, 0.012294480221518, 495.0, 495.0, 495.0, 0, 1, 1, -360, 7.031000000000001 ],
[411, 410, 0, 0.002054214876033058, 0.0054329327333556, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.1069999999999998 ],
[403, 360, 0, 0.008191481994459833, 0.07351353506655639, 856.0, 856.0, 856.0, 0, 1, 1, -360, 23.656999999999996 ],
[412, 360, 0, 0.016761772853185596, 0.15042664773666, 856.0, 856.0, 856.0, 0, 1, 1, -360, 48.408 ],
[326, 413, 0, 0.012077024793388432, 0.12776397267356798, 991.0, 991.0, 991.0, 0, 2, 1, -360, 36.533 ],
[414, 413, 0, 0.008093223140495867, 0.08561896310149601, 991.0, 991.0, 991.0, 0, 2, 1, -360, 24.482 ],
[6, 297, 0, 0.019472396694214876, 0.0128750188978664, 248.0, 248.0, 248.0, 0, 1, 1, -360, 14.725999999999999 ],
[554, 580, 0, 0.07435371900826447, 0.196648733567264, 495.0, 495.0, 495.0, 0, 1, 1, -360, 112.46 ],
[262, 401, 0, 0.03931232686980609, 0.35280406181043206, 856.0, 856.0, 856.0, 0, 1, 1, -360, 113.53399999999999 ],
[499, 556, 0, 0.04185586776859504, 0.11069928308639199, 495.0, 495.0, 495.0, 0, 2, 1, -360, 63.306999999999995 ],
[224, 229, 0, 0.004135206611570248, 0.0437467367631624, 991.0, 991.0, 991.0, 0, 1, 1, -360, 12.509 ],
[583, 507, 0, 0.024632727272727268, 0.065147980317596, 495.0, 495.0, 495.0, 0, 1, 1, -360, 37.257 ],
[415, 307, 0, 0.015675554016620498, 0.1406784987952448, 856.0, 856.0, 856.0, 0, 1, 1, -360, 45.271 ],
[416, 507, 0, 0.0010555371900826446, 0.011166626467730801, 991.0, 991.0, 991.0, 0, 1, 1, -360, 3.193 ],
[284, 561, 0, 0.015221487603305786, 0.16102953827307598, 991.0, 991.0, 991.0, 0, 1, 1, -360, 46.045 ],
[543, 417, 0, 0.0006614876033057851, 0.027991756419545603, 1981.0, 1981.0, 1981.0, 0, 4, 1, -360, 4.002 ],
[418, 506, 0, 0.0009395041322314049, 0.009939101917118, 991.0, 991.0, 991.0, 0, 1, 1, -360, 2.842 ],
[220, 157, 0, 0.004599549861495845, 0.165112574384632, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 26.566999999999997 ],
[295, 419, 0, 0.0012023140495867769, 0.012719392565946, 991.0, 991.0, 991.0, 0, 1, 1, -360, 3.637 ],
[295, 420, 0, 0.0008003305785123967, 0.008466771900532, 991.0, 991.0, 991.0, 0, 1, 1, -360, 2.421 ],
[541, 62, 0, 0.05133355371900827, 0.0339414035471236, 248.0, 248.0, 248.0, 0, 1, 1, -360, 38.821 ],
[52, 421, 0, 0.00013885041551246538, 0.004984389831631239, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.802 ],
[60, 160, 0, 6.128808864265928e-05, 0.000550023067454096, 856.0, 856.0, 856.0, 0, 2, 1, -360, 0.177 ],
[535, 161, 0, 3.735537190082645e-05, 0.00039518596644331203, 991.0, 991.0, 991.0, 0, 2, 1, -360, 0.113 ],
[267, 282, 0, 0.0065652700831024926, 0.235677115717012, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 37.921 ],
[52, 365, 0, 0.007655586334279779, 0.15458444922992, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 33.164 ],
[28, 27, 0, 0.015726942148760328, 0.041594197273402404, 495.0, 495.0, 495.0, 0, 1, 1, -360, 23.787 ],
[30, 201, 0, 0.009128289473684211, 0.327683234253536, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 52.725 ],
[422, 81, 0, 0.0004226685133887349, 0.13655487952674, 5134.0, 5134.0, 5134.0, 0, 6, 1, -360, 7.324 ],
[119, 425, 0, 0.003579120498614958, 0.1284816595874996, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.673000000000002 ],
[423, 425, 0, 0.0006518351800554017, 0.0233992864289392, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 3.765 ],
[424, 425, 0, 0.005922957063711911, 0.21261965153389198, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 34.211 ],
[426, 428, 0, 0.013948429752066116, 0.14756174042535197, 991.0, 991.0, 991.0, 0, 2, 1, -360, 42.193999999999996 ],
[427, 428, 0, 0.0002664462809917355, 0.0028187600792304794, 991.0, 991.0, 991.0, 0, 2, 1, -360, 0.8059999999999999 ],
[19, 428, 0, 0.023607603305785128, 0.24974703912892798, 991.0, 991.0, 991.0, 0, 2, 1, -360, 71.413 ],
[45, 429, 0, 0.02562314049586777, 0.067767398802972, 495.0, 495.0, 495.0, 0, 1, 1, -360, 38.755 ],
[44, 429, 0, 5.289256198347107e-05, 0.00013988883767892, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.08 ],
[505, 429, 0, 0.006012561983471073, 0.015901863623161996, 495.0, 495.0, 495.0, 0, 1, 1, -360, 9.094 ],
[231, 431, 0, 0.011677285318559558, 0.4191859418495199, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 67.44800000000001 ],
[190, 431, 0, 0.009600761772853185, 0.34464383257266795, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 55.45399999999999 ],
[430, 431, 0, 0.0028100761772853187, 0.1008748520662472, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.230999999999998 ],
[286, 433, 0, 0.01568694214876033, 0.16595362535967603, 991.0, 991.0, 991.0, 0, 1, 1, -360, 47.453 ],
[432, 433, 0, 0.00010049586776859504, 0.00106315516636076, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.304 ],
[506, 433, 0, 0.0065904132231404955, 0.06972059669946801, 991.0, 991.0, 991.0, 0, 1, 1, -360, 19.936 ],
[23, 434, 0, 0.02613685950413223, 0.069126069139116, 495.0, 495.0, 495.0, 0, 2, 1, -360, 39.532 ],
[400, 434, 0, 0.008155371900826446, 0.021569110159669603, 495.0, 495.0, 495.0, 0, 2, 1, -360, 12.335 ],
[500, 434, 0, 0.006338512396694216, 0.0167639285853336, 495.0, 495.0, 495.0, 0, 2, 1, -360, 9.587 ],
[32, 436, 0, 0.0044813019390581715, 0.16086776359270402, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 25.884 ],
[435, 436, 0, 0.0006634349030470914, 0.023815688073266, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 3.832 ],
[78, 436, 0, 0.00897680055401662, 0.32224515307884394, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 51.85 ],
[86, 438, 0, 0.014693213296398892, 0.52745036936438, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 84.868 ],
[437, 438, 0, 1.0387811634349031e-05, 0.0003728969948845, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.06 ],
[221, 438, 0, 0.002280124653739612, 0.081850890377238, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.17 ],
[207, 439, 0, 0.055703801652892564, 0.0368309823503996, 248.0, 248.0, 248.0, 0, 1, 1, -360, 42.126000000000005 ],
[516, 439, 0, 0.05448462809917355, 0.03602487292327441, 248.0, 248.0, 248.0, 0, 1, 1, -360, 41.20399999999999 ],
[513, 439, 0, 0.046726611570247926, 0.0308953241066316, 248.0, 248.0, 248.0, 0, 1, 1, -360, 35.336999999999996 ],
[181, 441, 0, 0.040805289256198356, 0.10792074104825197, 495.0, 495.0, 495.0, 0, 1, 1, -360, 61.718 ],
[440, 441, 0, 0.0001322314049586777, 0.000349722094197784, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.2 ],
[504, 441, 0, 0.05916099173553719, 0.156467413554364, 495.0, 495.0, 495.0, 0, 1, 1, -360, 89.48100000000001 ],
[135, 442, 0, 0.004956890581717451, 0.177940231009092, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 28.631 ],
[109, 442, 0, 0.0015380886426592797, 0.055213615042649204, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 8.884 ],
[112, 442, 0, 0.0027304362880886425, 0.09801597510545401, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 15.770999999999999 ],
[113, 443, 0, 0.0019885734072022164, 0.07138491472072879, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 11.485999999999999 ],
[132, 443, 0, 0.006788434903047091, 0.24368818615747198, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 39.21 ],
[107, 443, 0, 2.2333795013850418e-05, 0.000801728539002036, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.129 ],
[444, 445, 0, 7.877423822714682e-05, 0.00282780221121528, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.455 ],
[112, 445, 0, 0.002816135734072022, 0.101092375313206, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.266 ],
[109, 445, 0, 0.0014354224376731304, 0.0515281497432104, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 8.291 ],
[119, 447, 0, 0.005212690443213296, 0.74849127803204, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 60.217 ],
[100, 447, 0, 0.0050695117728531865, 0.7279322237145921, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 58.563 ],
[446, 447, 0, 2.9518698060941832e-05, 0.00423859584186224, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 0.341 ],
[124, 448, 0, 6.509695290858726e-05, 0.00233682116794768, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.376 ],
[125, 448, 0, 0.00615148891966759, 0.22082338542026803, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 35.531 ],
[131, 448, 0, 3.912742382271468e-05, 0.0014045786807313759, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.226 ],
[449, 450, 0, 0.0023614958448753462, 0.08477191683710039, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.64 ],
[173, 450, 0, 0.002862361495844876, 0.10275176694050518, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.533 ],
[184, 450, 0, 0.004022853185595568, 0.14441057621844403, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 23.236 ],
[144, 451, 0, 0.007672727272727273, 0.020292624515794402, 495.0, 495.0, 495.0, 0, 1, 1, -360, 11.605 ],
[140, 451, 0, 0.006991074380165291, 0.018489807120219602, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.574000000000002 ],
[514, 451, 0, 0.01149289256198347, 0.030396095817207994, 495.0, 495.0, 495.0, 0, 1, 1, -360, 17.383 ],
[537, 585, 0, 0.05072595041322314, 0.134158641165824, 495.0, 495.0, 495.0, 0, 1, 1, -360, 76.723 ],
[141, 585, 0, 0.007994710743801653, 0.0211441978151932, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.092 ],
[584, 585, 0, 9.256198347107438e-05, 0.000244805465938352, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.14 ],
[522, 454, 0, 0.0035008264462809916, 0.0092588924438956, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.295 ],
[144, 454, 0, 0.00452892561983471, 0.011977981726290799, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.85 ],
[453, 454, 0, 0.001114710743801653, 0.0029481572540882, 495.0, 495.0, 495.0, 0, 1, 1, -360, 1.686 ],
[199, 456, 0, 0.013063140495867768, 0.0086372614214612, 248.0, 248.0, 248.0, 0, 1, 1, -360, 9.879 ],
[140, 456, 0, 0.005061818181818182, 0.013387361765852802, 495.0, 495.0, 495.0, 0, 2, 1, -360, 7.656000000000001 ],
[455, 456, 0, 0.0011365289256198346, 0.00300586139962416, 495.0, 495.0, 495.0, 0, 2, 1, -360, 1.719 ],
[537, 456, 0, 0.039058512396694216, 0.025825228046024003, 248.0, 248.0, 248.0, 0, 1, 1, -360, 29.538 ],
[538, 457, 0, 0.027927272727272728, 0.0184653265736368, 248.0, 248.0, 248.0, 0, 1, 1, -360, 21.12 ],
[153, 457, 0, 0.030093223140495867, 0.019897438549384, 248.0, 248.0, 248.0, 0, 1, 1, -360, 22.758000000000003 ],
[176, 457, 0, 0.004579173553719009, 0.0030277190305137603, 248.0, 248.0, 248.0, 0, 1, 1, -360, 3.463 ],
[524, 459, 0, 0.004318677685950414, 0.011421923596476799, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.532 ],
[458, 459, 0, 0.001993388429752066, 0.0052720605700488, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.015 ],
[134, 459, 0, 0.011813553719008265, 0.031244171895617998, 495.0, 495.0, 495.0, 0, 1, 1, -360, 17.868 ],
[460, 461, 0, 6.611570247933885e-05, 0.000174861047098892, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.1 ],
[150, 461, 0, 0.008018512396694214, 0.021207147792120403, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.128 ],
[149, 461, 0, 0.005586115702479339, 0.0147740098693748, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.449 ],
[521, 463, 0, 0.014348429752066114, 0.009487086110365599, 248.0, 248.0, 248.0, 0, 1, 1, -360, 10.850999999999999 ],
[462, 463, 0, 0.007197355371900825, 0.0047588433967958406, 248.0, 248.0, 248.0, 0, 1, 1, -360, 5.443 ],
[538, 463, 0, 0.012211570247933883, 0.0080742088497664, 248.0, 248.0, 248.0, 0, 1, 1, -360, 9.235 ],
[110, 464, 0, 0.0025753116343490306, 0.0924473799817492, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.875 ],
[90, 464, 0, 0.007328947368421053, 0.26309125979076, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 42.332 ],
[165, 464, 0, 0.002152527700831025, 0.0772704722900764, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 12.433 ],
[458, 465, 0, 0.002003305785123967, 0.0052982897270776, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.03 ],
[134, 465, 0, 0.011838677685950413, 0.031310619093534, 495.0, 495.0, 495.0, 0, 1, 1, -360, 17.906 ],
[524, 465, 0, 0.004293553719008264, 0.0113554763986092, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.494 ],
[466, 467, 0, 0.0023509349030470914, 0.084392804892244, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.579 ],
[110, 467, 0, 0.0025337603878116343, 0.09095579200221118, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.635 ],
[165, 467, 0, 0.0022891274238227145, 0.08217406777274441, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.222000000000001 ],
[468, 469, 0, 0.0005269421487603305, 0.0013936425453786, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.797 ],
[541, 469, 0, 0.022390743801652895, 0.05921844221026801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 33.866 ],
[490, 469, 0, 0.028243305785123966, 0.07469714209944801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 42.718 ],
[263, 471, 0, 0.0371900826446281, 0.0245898347482832, 248.0, 248.0, 248.0, 0, 1, 1, -360, 28.125 ],
[470, 471, 0, 0.001570909090909091, 0.0010386746197682802, 248.0, 248.0, 248.0, 0, 1, 1, -360, 1.188 ],
[534, 471, 0, 0.024497190082644622, 0.0161973787927468, 248.0, 248.0, 248.0, 0, 1, 1, -360, 18.526 ],
[136, 472, 0, 0.0007079293628808865, 0.025412930201351602, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 4.0889999999999995 ],
[110, 472, 0, 0.00019511772853185596, 0.0070042485539216805, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.127 ],
[251, 472, 0, 4.207063711911357e-05, 0.00151023282928764, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.243 ],
[226, 474, 0, 0.017639669421487602, 0.011663231841509601, 248.0, 248.0, 248.0, 0, 1, 1, -360, 13.34 ],
[473, 474, 0, 0.003467107438016529, 0.00916971330986216, 495.0, 495.0, 495.0, 0, 2, 1, -360, 5.244 ],
[257, 474, 0, 0.020264462809917356, 0.053594910935781594, 495.0, 495.0, 495.0, 0, 2, 1, -360, 30.65 ],
[6, 474, 0, 0.08066247933884299, 0.05333349367016, 248.0, 248.0, 248.0, 0, 1, 1, -360, 61.001000000000005 ],
[299, 475, 0, 0.013238227146814403, 0.47521993028123993, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 76.464 ],
[3, 475, 0, 0.0002794321329639889, 0.010030929162389441, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.614 ],
[210, 475, 0, 0.0001481994459833795, 0.00531999712702368, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.856 ],
[297, 476, 0, 0.0193500826446281, 0.05117658265464801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 29.267 ],
[296, 476, 0, 0.005596694214876033, 0.014801987636898, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.465 ],
[295, 476, 0, 0.0009474380165289256, 0.00250575880492432, 495.0, 495.0, 495.0, 0, 1, 1, -360, 1.433 ],
[313, 478, 0, 0.008696849030470914, 0.31219557906752804, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 50.233000000000004 ],
[477, 478, 0, 1.5235457063711912e-05, 0.0005469155924977479, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.08800000000000001 ],
[245, 478, 0, 0.005264542936288089, 0.188984197007248, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 30.408 ],
[479, 481, 0, 0.028420495867768597, 0.07516576970575199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 42.986000000000004 ],
[565, 481, 0, 0.024842314049586776, 0.065702289836964, 495.0, 495.0, 495.0, 0, 1, 1, -360, 37.574 ],
[480, 481, 0, 7.735537190082645e-05, 0.000204587425105844, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.11699999999999999 ],
[415, 482, 0, 0.011021814404432133, 0.0989140353680364, 856.0, 856.0, 856.0, 0, 1, 1, -360, 31.831 ],
[56, 482, 0, 0.002630886426592798, 0.0236105947261788, 856.0, 856.0, 856.0, 0, 1, 1, -360, 7.598 ],
[409, 482, 0, 0.0007635041551246537, 0.0068519822810072005, 856.0, 856.0, 856.0, 0, 1, 1, -360, 2.205 ],
[483, 484, 0, 9.037396121883656e-05, 0.000811050963873968, 856.0, 856.0, 856.0, 0, 1, 1, -360, 0.261 ],
[3, 484, 0, 0.010022160664819944, 0.08994275516621358, 856.0, 856.0, 856.0, 0, 1, 1, -360, 28.944000000000003 ],
[301, 484, 0, 0.00966516620498615, 0.08673894848517479, 856.0, 856.0, 856.0, 0, 1, 1, -360, 27.913 ],
[233, 485, 0, 0.01410180055401662, 0.1265550251138996, 856.0, 856.0, 856.0, 0, 1, 1, -360, 40.726 ],
[392, 485, 0, 0.00914819944598338, 0.0820994883738036, 856.0, 856.0, 856.0, 0, 1, 1, -360, 26.42 ],
[391, 485, 0, 8.518005540166207e-05, 0.000764438839512864, 856.0, 856.0, 856.0, 0, 1, 1, -360, 0.24600000000000002 ],
[579, 488, 0, 0.004636473829194215, 0.11036180126571601, 1486.0, 1486.0, 1486.0, 0, 1, 1, -360, 21.038 ],
[486, 488, 0, 0.00016969696969690082, 0.00403929018798184, 1486.0, 1486.0, 1486.0, 0, 1, 1, -360, 0.77 ],
[487, 488, 0, 0.00014567493112954544, 0.00346749456396992, 1486.0, 1486.0, 1486.0, 0, 1, 1, -360, 0.6609999999999999 ],
[270, 489, 0, 0.0001745152354570637, 0.0062646695140596, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.008 ],
[331, 489, 0, 0.003002943213296399, 0.10779830627119119, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 17.345 ],
[396, 489, 0, 0.01124792243767313, 0.40377286606072005, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 64.968 ],
[519, 253, 0, 0.013353485337561985, 0.141267767926912, 991.0, 991.0, 991.0, 0, 1, 1, -360, 40.394293146100004 ],
[382, 349, 0, 0.009091647380263157, 1.30547149138788, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 105.02671053600001 ],
[349, 351, 0, 0.0005858117819605263, 0.0841168325920224, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 6.76729770521 ],
[459, 465, 0, 1.578788789911157e-05, 0.00016702153987596, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.047758360894800005 ],
[549, 550, 0, 3.680432518409091e-05, 0.000389356391787088, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.111333083682 ],
[550, 551, 0, 5.755645674710744e-05, 0.0006088951287918401, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.17410828165999997 ],
[194, 195, 0, 1.7560672583171745e-05, 0.00252154053805592, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.202860889681 ],
[247, 248, 0, 2.1755213937811637e-05, 0.0031238355819477198, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.25131623141 ],
[2, 294, 0, 2.3531392658518004e-05, 0.003378877444715, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.271834647991 ],
[549, 551, 0, 9.265809538429751e-05, 0.0009802386406577602, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.28029073853799996 ],
[54, 365, 0, 2.573045189134349e-05, 0.00369464080598484, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.297238180249 ],
[131, 265, 0, 2.7616389041343487e-05, 0.00396544290388756, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.319024526206 ],
[91, 92, 0, 2.8945628197853184e-05, 0.0041563086239824396, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.33437989694200004 ],
[247, 249, 0, 3.098840072160664e-05, 0.00444963074500788, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.357978005136 ],
[186, 191, 0, 3.1591661821191135e-05, 0.00453625312865552, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.36494687735799997 ],
[129, 173, 0, 3.202671277479225e-05, 0.00459872218332188, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.369972585975 ],
[96, 202, 0, 3.5971247867797784e-05, 0.00516511877739804, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.415539855369 ],
[53, 320, 0, 3.784209581142659e-05, 0.00543375421308236, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.437151890814 ],
[24, 396, 0, 4.144748602818559e-05, 0.005951452925597279, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.47880135859800005 ],
[133, 156, 0, 4.431754564044322e-05, 0.0063635653674415605, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.511956287238 ],
[442, 452, 0, 4.483572190450138e-05, 0.006437970402313801, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.517942259441 ],
[445, 452, 0, 4.490753296371191e-05, 0.0064482817668697215, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.518771820797 ],
[247, 250, 0, 4.594910768732687e-05, 0.00659784169268824, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.530804092004 ],
[187, 195, 0, 4.755760376239612e-05, 0.006828805970367921, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.549385438663 ],
[216, 236, 0, 5.03353075283241e-05, 0.00722765701751724, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.581473472567 ],
[244, 389, 0, 5.1633313019736845e-05, 0.007414037889302401, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.596468032004 ],
[394, 406, 0, 5.6346419007686985e-05, 0.008090793734075721, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.650913832377 ],
[442, 445, 0, 6.388070648310249e-05, 0.00917264360085512, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.737949921293 ],
[442, 444, 0, 6.584378362735456e-05, 0.00945452224616264, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.760627388463 ],
[198, 472, 0, 8.37554210498615e-05, 0.0120264578966664, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.967542623967 ],
[464, 467, 0, 8.460287496468144e-05, 0.01214814397621276, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.977332411594 ],
[198, 251, 0, 8.83613182396122e-05, 0.012687819608389479, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 1.0207499483 ],
[112, 143, 0, 9.049653833033241e-05, 0.012994416294241841, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 1.04541601079 ],
[2, 490, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[5, 491, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[10, 492, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[12, 493, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[13, 494, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[15, 495, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[18, 496, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[20, 497, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[22, 498, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[24, 499, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[26, 500, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[30, 501, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[32, 502, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[37, 503, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[42, 504, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[46, 505, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[52, 506, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[56, 507, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[61, 508, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[68, 509, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[69, 510, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[74, 511, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[78, 512, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[86, 513, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[87, 514, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[94, 515, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[95, 516, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[96, 517, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[99, 518, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[100, 519, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[104, 520, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[105, 521, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[106, 522, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[107, 523, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[117, 524, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[120, 525, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[123, 526, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[124, 527, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[125, 528, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[128, 529, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[129, 530, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[138, 531, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[143, 532, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[156, 533, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[157, 534, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[159, 535, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[160, 536, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[165, 537, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[184, 538, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[191, 539, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[195, 540, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[201, 541, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[220, 542, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[231, 543, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[232, 544, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[233, 545, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[236, 546, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[245, 547, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[246, 548, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[248, 549, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[249, 550, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[250, 551, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[259, 552, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[261, 553, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[262, 554, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[265, 555, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[270, 556, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[277, 557, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[279, 558, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[280, 559, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[290, 560, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[301, 561, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[305, 562, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[306, 563, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[310, 564, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[313, 565, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[315, 566, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[320, 567, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[330, 568, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[332, 569, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[334, 570, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[336, 571, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[349, 572, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[351, 573, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[358, 574, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[360, 575, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[380, 576, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[382, 577, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[383, 578, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[389, 579, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[401, 580, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[402, 581, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[409, 582, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[415, 583, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[444, 584, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[452, 585, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ]
])
ppc["gen_control"] = array([
[586, 1, 0.08658028904199107, 4.329014452099554, 0, 0, 0],
[589, 1, 0.010042676909098597, 0.5021338454549299, 0, 0, 0],
[590, 1, 0.012095775674984046, 0.6047887837492023, 0, 0, 0],
[593, 1, 0.0017666198683200384, 0.08833099341600192, 0, 0, 0],
[595, 1, 1.50560576164933, 75.2802880824665, 0, 0, 0],
[598, 1, 0.0038197186342054878, 0.1909859317102744, 0, 0, 0],
[599, 1, 0.0029602819415092537, 0.1480140970754627, 0, 0, 0],
[602, 1, 0.007830423200121252, 0.39152116000606263, 0, 0, 0],
[603, 1, 1.0997606567649967, 54.98803283824984, 0, 0, 0],
[607, 1, 0.5729577951308232, 28.64788975654116, 0, 0, 0],
[608, 1, 0.0076394372684109755, 0.3819718634205488, 0, 0, 0],
[609, 1, 0.0057932399285449895, 0.2896619964272495, 0, 0, 0],
[612, 1, 0.00954929658551372, 0.477464829275686, 0, 0, 0],
[614, 1, 0.00954929658551372, 0.477464829275686, 0, 0, 0],
[616, 1, 0.0046154933496649645, 0.23077466748324824, 0, 0, 0],
[617, 1, 0.04360845440717932, 2.1804227203589663, 0, 0, 0],
[618, 1, 0.010631550198538607, 0.5315775099269304, 0, 0, 0],
[619, 1, 0.037560566569687294, 1.8780283284843649, 0, 0, 0],
[624, 1, 0.004297183463481174, 0.21485917317405873, 0, 0, 0],
[629, 1, 0.023968734429639437, 1.198436721481972, 0, 0, 0],
[632, 1, 0.01435577586688896, 0.717788793344448, 0, 0, 0],
[637, 1, 0.017093240888069558, 0.854662044403478, 0, 0, 0],
[638, 1, 0.02048324117592693, 1.0241620587963465, 0, 0, 0],
[640, 1, 0.0038197186342054878, 0.1909859317102744, 0, 0, 0],
[641, 1, 0.0040107045659157625, 0.20053522829578813, 0, 0, 0],
[642, 1, 0.00919915571071155, 0.4599577855355775, 0, 0, 0],
[643, 1, 0.27279157245950864, 13.639578622975431, 0, 0, 0],
[647, 1, 0.00445633840657307, 0.2228169203286535, 0, 0, 0],
[652, 1, 0.00746436683100989, 0.37321834155049455, 0, 0, 0],
[655, 1, 0.019576058000303126, 0.9788029000151565, 0, 0, 0],
[663, 1, 0.00238732414637843, 0.1193662073189215, 0, 0, 0],
[666, 1, 0.00919915571071155, 0.4599577855355775, 0, 0, 0],
[672, 1, 0.010536057232683471, 0.5268028616341736, 0, 0, 0],
[676, 1, 0.11777465788800255, 5.888732894400127, 0, 0, 0],
[681, 1, 0.0063821132179850025, 0.31910566089925013, 0, 0, 0],
[683, 1, 0.008753521870054244, 0.4376760935027122, 0, 0, 0],
[687, 1, 0.42303383873825773, 21.151691936912886, 0, 0, 0],
[696, 1, 0.22950142793851305, 11.475071396925653, 0, 0, 0],
[697, 1, 0.0036923946797319715, 0.1846197339865986, 0, 0, 0],
[698, 1, 0.0038197186342054878, 0.1909859317102744, 0, 0, 0],
[702, 1, 0.023363945645890238, 1.168197282294512, 0, 0, 0],
[705, 1, 0.005411268065124442, 0.27056340325622213, 0, 0, 0],
[707, 1, 0.010822536130248884, 0.5411268065124443, 0, 0, 0],
[713, 1, 0.004265352474862795, 0.21326762374313976, 0, 0, 0],
[714, 1, 0.00477464829275686, 0.238732414637843, 0, 0, 0],
[716, 1, 1.5915494309189534e-05, 0.0007957747154594768, 0, 0, 0],
[717, 1, 0.0017507043740108488, 0.08753521870054244, 0, 0, 0],
[719, 1, 0.623250757147862, 31.162537857393104, 0, 0, 0],
[722, 1, 0.006589014644004467, 0.3294507322002233, 0, 0, 0],
[724, 1, 0.0019257748114119334, 0.09628874057059668, 0, 0, 0],
[727, 1, 0.019576058000303126, 0.9788029000151565, 0, 0, 0],
[728, 1, 0.16233804195373325, 8.116902097686662, 0, 0, 0],
[730, 1, 0.10077690996578814, 5.038845498289407, 0, 0, 0],
[732, 1, 0.004647324338283344, 0.2323662169141672, 0, 0, 0],
[735, 1, 0.013496339174192726, 0.6748169587096363, 0, 0, 0],
[738, 1, 0.04408591923645501, 2.2042959618227504, 0, 0, 0],
[741, 1, 0.0340591578216656, 1.7029578910832803, 0, 0, 0],
[742, 1, 0.0028647889756541157, 0.14323944878270578, 0, 0, 0],
[747, 1, 0.0039788735772973835, 0.1989436788648692, 0, 0, 0],
[748, 1, 0.03501408748021698, 1.7507043740108488, 0, 0, 0],
[749, 1, 0.0025464790894703256, 0.12732395447351627, 0, 0, 0],
[750, 1, 0.028902537665488188, 1.4451268832744095, 0, 0, 0],
[758, 1, 0.0058887328944001276, 0.2944366447200064, 0, 0, 0],
[761, 1, 0.004997465213085514, 0.2498732606542757, 0, 0, 0],
[762, 1, 0.3517324242330887, 17.586621211654435, 0, 0, 0],
[765, 1, 0.018780283284843647, 0.9390141642421824, 0, 0, 0],
[767, 1, 0.0035650707252584553, 0.17825353626292276, 0, 0, 0],
[774, 1, 0.010663381187156987, 0.5331690593578494, 0, 0, 0],
[777, 1, 0.012573240504259732, 0.6286620252129866, 0, 0, 0],
[778, 1, 0.004679155326901723, 0.23395776634508614, 0, 0, 0],
[781, 1, 0.4169859509007658, 20.84929754503829, 0, 0, 0],
[784, 1, 0.4058451048843331, 20.292255244216655, 0, 0, 0],
[785, 1, 0.00047746482927568597, 0.0238732414637843, 0, 0, 0],
[787, 1, 0.24764509145098912, 12.382254572549456, 0, 0, 0],
[788, 1, 0.2785211504108168, 13.926057520540843, 0, 0, 0],
[789, 1, 0.0123185925953127, 0.615929629765635, 0, 0, 0],
[791, 1, 0.0031830988618379067, 0.15915494309189535, 0, 0, 0],
[792, 1, 0.009979014931861837, 0.49895074659309185, 0, 0, 0],
[795, 1, 0.004329014452099553, 0.2164507226049777, 0, 0, 0],
[800, 1, 0.0058091554228541795, 0.290457771142709, 0, 0, 0],
[801, 1, 0.007957747154594767, 0.3978873577297384, 0, 0, 0],
[802, 1, 0.07957747154594767, 3.9788735772973833, 0, 0, 0],
[805, 1, 0.44881693951914486, 22.440846975957243, 0, 0, 0],
[806, 1, 0.005697746962689853, 0.2848873481344927, 0, 0, 0],
[808, 1, 0.034616200122487235, 1.7308100061243619, 0, 0, 0],
[809, 1, 0.0039788735772973835, 0.1989436788648692, 0, 0, 0],
[811, 1, 0.0040107045659157625, 0.20053522829578813, 0, 0, 0],
[814, 1, 0.014164789935178685, 0.7082394967589343, 0, 0, 0],
[816, 1, 0.012748310941660816, 0.6374155470830408, 0, 0, 0],
[817, 1, 0.017188733853924696, 0.8594366926962349, 0, 0, 0],
[821, 1, 0.013130282805081364, 0.6565141402540683, 0, 0, 0],
[822, 1, 0.04265352474862795, 2.1326762374313977, 0, 0, 0],
[826, 1, 0.018461973398659858, 0.9230986699329929, 0, 0, 0],
[834, 1, 0.007416620348082323, 0.37083101740411617, 0, 0, 0],
[835, 1, 0.010138169874953733, 0.5069084937476867, 0, 0, 0],
[836, 1, 0.008116902097686661, 0.4058451048843331, 0, 0, 0],
[837, 1, 0.15024226627874918, 7.512113313937459, 0, 0, 0],
[839, 1, 0.011666057328635928, 0.5833028664317964, 0, 0, 0],
[841, 1, 0.0037083101740411615, 0.18541550870205808, 0, 0, 0],
[843, 1, 0.10599719209920229, 5.2998596049601145, 0, 0, 0],
[844, 1, 0.012732395447351627, 0.6366197723675814, 0, 0, 0],
[850, 1, 0.005092958178940651, 0.25464790894703254, 0, 0, 0],
[851, 1, 0.01265281797580568, 0.632640898790284, 0, 0, 0],
[853, 1, 0.0036923946797319715, 0.1846197339865986, 0, 0, 0],
[856, 1, 0.011459155902616463, 0.5729577951308231, 0, 0, 0],
[857, 1, 0.4462704604296745, 22.313523021483725, 0, 0, 0],
[858, 1, 0.01808000153523931, 0.9040000767619655, 0, 0, 0],
[860, 1, 0.0039788735772973835, 0.1989436788648692, 0, 0, 0],
[865, 1, 0.0035014087480216977, 0.17507043740108488, 0, 0, 0],
[867, 1, 0.24478030247533505, 12.239015123766753, 0, 0, 0],
[870, 1, 0.018589297353133374, 0.9294648676566688, 0, 0, 0],
[872, 1, 0.00716197243913529, 0.3580986219567645, 0, 0, 0],
[873, 1, 0.038833806114422456, 1.941690305721123, 0, 0, 0],
[874, 1, 0.006589014644004467, 0.3294507322002233, 0, 0, 0],
[875, 1, 0.007766761222884492, 0.38833806114422464, 0, 0, 0],
[877, 1, 0.007894085177358009, 0.39470425886790045, 0, 0, 0],
[882, 1, 0.005538592019597957, 0.2769296009798979, 0, 0, 0],
[883, 1, 0.005729577951308231, 0.28647889756541156, 0, 0, 0],
[885, 1, 0.15597184423005742, 7.798592211502871, 0, 0, 0],
[886, 1, 0.8186930272647096, 40.93465136323548, 0, 0, 0],
[889, 1, 0.0030239439187460114, 0.15119719593730058, 0, 0, 0],
[890, 1, 0.0076394372684109755, 0.3819718634205488, 0, 0, 0],
[895, 1, 0.0030239439187460114, 0.15119719593730058, 0, 0, 0],
[896, 1, 0.0038197186342054878, 0.1909859317102744, 0, 0, 0],
[898, 1, 0.013464508185574344, 0.6732254092787172, 0, 0, 0],
[900, 1, 0.03584169318429482, 1.7920846592147412, 0, 0, 0],
[902, 1, 0.006207042780583919, 0.31035213902919595, 0, 0, 0],
[903, 1, 0.0031990143561470966, 0.15995071780735484, 0, 0, 0],
[905, 1, 0.021851973686517232, 1.0925986843258617, 0, 0, 0],
[906, 1, 0.010504226244065093, 0.5252113122032547, 0, 0, 0],
[907, 1, 0.02142225534016911, 1.0711127670084555, 0, 0, 0],
[909, 1, 0.005856901905781748, 0.2928450952890874, 0, 0, 0],
[917, 1, 0.005411268065124442, 0.27056340325622213, 0, 0, 0],
[918, 1, 0.012254930618075942, 0.612746530903797, 0, 0, 0],
[920, 1, 0.0020371832715762603, 0.10185916357881303, 0, 0, 0],
[921, 1, 0.019735212943395024, 0.9867606471697512, 0, 0, 0],
[923, 1, 0.023236621691416718, 1.161831084570836, 0, 0, 0],
[925, 1, 0.008276057040778557, 0.4138028520389279, 0, 0, 0],
[931, 1, 0.03455253814525047, 1.7276269072625237, 0, 0, 0],
[936, 1, 0.016615776058793875, 0.8307888029396938, 0, 0, 0],
[937, 1, 0.00477464829275686, 0.238732414637843, 0, 0, 0],
[939, 1, 1.5915494309189534e-05, 0.0007957747154594768, 0, 0, 0],
[940, 1, 0.009421972631040205, 0.47109863155201026, 0, 0, 0],
[944, 1, 0.004042535554534142, 0.2021267777267071, 0, 0, 0],
[950, 1, 0.005092958178940651, 0.25464790894703254, 0, 0, 0],
[952, 1, 0.005045211696013082, 0.2522605848006541, 0, 0, 0],
[958, 1, 0.010615634704229418, 0.530781735211471, 0, 0, 0],
[959, 1, 0.007241549910681238, 0.3620774955340619, 0, 0, 0],
[960, 1, 0.004217605991935227, 0.21088029959676136, 0, 0, 0],
[963, 1, 0.2785211504108168, 13.926057520540843, 0, 0, 0],
[968, 1, 0.017188733853924696, 0.8594366926962349, 0, 0, 0],
[969, 1, 0.018111832523857688, 0.9055916261928845, 0, 0, 0],
[971, 1, 0.0031830988618379067, 0.15915494309189535, 0, 0, 0],
[973, 1, 0.4287634166895661, 21.438170834478306, 0, 0, 0],
[976, 1, 0.008562535938343968, 0.4281267969171984, 0, 0, 0],
[978, 1, 0.0007321127382227185, 0.03660563691113593, 0, 0, 0],
[982, 1, 0.0015756339366097638, 0.07878169683048819, 0, 0, 0],
[984, 1, 0.14801409707546268, 7.400704853773133, 0, 0, 0],
[985, 1, 0.0035014087480216977, 0.17507043740108488, 0, 0, 0],
[986, 1, 0.0017825353626292277, 0.08912676813146138, 0, 0, 0],
[987, 1, 0.02618098813861678, 1.3090494069308392, 0, 0, 0],
[988, 1, 0.0008116902097686662, 0.04058451048843331, 0, 0, 0],
[993, 1, 0.06238873769202297, 3.119436884601149, 0, 0, 0],
[994, 1, 0.010504226244065093, 0.5252113122032547, 0, 0, 0],
[995, 1, 0.0006684507609859605, 0.033422538049298026, 0, 0, 0],
[997, 1, 0.005984225860255264, 0.2992112930127632, 0, 0, 0],
[999, 1, 0.004965634224467135, 0.24828171122335674, 0, 0, 0],
[1000, 1, 0.015597184423005743, 0.7798592211502873, 0, 0, 0],
[1002, 1, 0.0031512678732195276, 0.15756339366097638, 0, 0, 0],
[1003, 1, 0.2864788975654116, 14.32394487827058, 0, 0, 0],
[1007, 1, 0.007416620348082323, 0.37083101740411617, 0, 0, 0],
[1010, 1, 0.238732414637843, 11.93662073189215, 0, 0, 0],
[1011, 1, 0.005952394871636886, 0.2976197435818443, 0, 0, 0],
[1012, 1, 0.9024085273310466, 45.12042636655233, 0, 0, 0],
[1027, 3, 0.003074873500535418, 0.15374367502677092, 2.22, 61.69, 0.004502],
[1028, 2, 0.025464790894703257, 1.273239544735163, 0, 0, 0],
[1029, 2, 0.003819718634205488, 0.19098593171027442, 0, 0, 0],
[1030, 2, 0.06480789282701978, 3.2403946413509894, 0, 0, 0],
[1031, 2, 0.0921316134570364, 4.60658067285182, 0, 0, 0],
[1032, 2, 0.009772775025341927, 0.4886387512670964, 0, 0, 0],
[1034, 2, 0.005364335122251813, 0.26821675611259066, 0, 0, 0],
[1035, 3, 0.00317587127473044, 0.158793563736522, 2.22, 61.69, 0.004502],
[1036, 2, 0.0042795539826391196, 0.21397769913195597, 0, 0, 0],
[1037, 2, 0.0060277734620055035, 0.3013886731002752, 0, 0, 0],
[1038, 2, 0.005462103769994554, 0.2731051884997277, 0, 0, 0],
[1039, 2, 0.008449479506347874, 0.42247397531739384, 0, 0, 0],
[1040, 3, 2.336343608570577e-06, 0.00011681718042852884, 2.22, 61.69, 0.004502],
[1041, 2, 0.012998987840239671, 0.6499493920119837, 0, 0, 0],
[1042, 2, 0.00335501991632689, 0.1677509958163445, 0, 0, 0],
[1043, 3, 0.00027639280106176795, 0.0138196400530884, 2.22, 61.69, 0.004502],
[1044, 3, 0.0023022419250361527, 0.11511209625180763, 2.22, 61.69, 0.004502],
[1045, 2, 0.003936615026511589, 0.19683075132557948, 0, 0, 0],
[1046, 2, 0.00679827557108513, 0.33991377855425653, 0, 0, 0],
[1047, 3, 0.0008294889076348922, 0.04147444538174461, 2.22, 61.69, 0.004502],
[1048, 2, 0.004561818873896339, 0.22809094369481697, 0, 0, 0],
[1049, 2, 0.01870104799381521, 0.9350523996907605, 0, 0, 0],
[1050, 2, 0.0033601814151550304, 0.1680090707577515, 0, 0, 0],
[1051, 2, 0.019380601737792977, 0.969030086889649, 0, 0, 0],
[1052, 3, 0.001315809692296204, 0.06579048461481019, 2.22, 61.69, 0.004502],
[1053, 3, 0.001042024786453249, 0.05210123932266245, 2.22, 61.69, 0.004502],
[1054, 2, 0.017434200209443074, 0.8717100104721537, 0, 0, 0],
[1055, 3, 0.0001818229987415119, 0.009091149937075596, 2.22, 61.69, 0.004502],
[1056, 2, 0.0384482661909012, 1.9224133095450602, 0, 0, 0],
[1057, 2, 0.02718238967557453, 1.3591194837787268, 0, 0, 0],
[1058, 2, 0.06721018861714274, 3.3605094308571375, 0, 0, 0],
[1059, 2, 0.02641152929543176, 1.320576464771588, 0, 0, 0],
[1060, 3, 0.0006590053340983933, 0.03295026670491967, 2.22, 61.69, 0.004502],
[1061, 2, 0.010304492946979937, 0.5152246473489969, 0, 0, 0],
[1062, 3, 0.00018325491392786168, 0.009162745696393085, 2.22, 61.69, 0.004502],
[1063, 3, 0.0005520076745724519, 0.0276003837286226, 2.22, 61.69, 0.004502],
[1064, 2, 0.013355424896304362, 0.667771244815218, 0, 0, 0],
[1065, 2, 0.021608252882636087, 1.0804126441318045, 0, 0, 0],
[1066, 2, 0.008344605676520173, 0.41723028382600863, 0, 0, 0],
[1067, 3, 0.002078788013715776, 0.1039394006857888, 2.22, 61.69, 0.004502],
[1068, 3, 0.00015551718230318853, 0.007775859115159426, 2.22, 61.69, 0.004502],
[1069, 3, 9.718800232405976e-05, 0.004859400116202988, 2.22, 61.69, 0.004502],
[1070, 3, 3.377592945720695e-05, 0.0016887964728603477, 2.22, 61.69, 0.004502],
[1071, 3, 0.0002755733400308117, 0.013778667001540588, 2.22, 61.69, 0.004502],
[1072, 2, 0.007168748144119091, 0.3584374072059546, 0, 0, 0],
[1073, 2, 0.004954025493475761, 0.24770127467378808, 0, 0, 0],
[1074, 2, 0.009778033156939965, 0.48890165784699824, 0, 0, 0],
[1075, 3, 0.0010048055180333312, 0.05024027590166657, 2.22, 61.69, 0.004502],
[1076, 3, 9.4271256803507e-05, 0.00471356284017535, 2.22, 61.69, 0.004502],
[1077, 3, 0.0016628534246063698, 0.08314267123031849, 2.22, 61.69, 0.004502],
[1078, 3, 0.0014461857774017207, 0.07230928887008603, 2.22, 61.69, 0.004502],
[1079, 2, 0.004604543003215469, 0.23022715016077344, 0, 0, 0],
[1080, 2, 0.003461464766403525, 0.17307323832017626, 0, 0, 0],
[1081, 2, 0.025823979083824652, 1.2911989541912325, 0, 0, 0],
[1082, 2, 0.03247105626963941, 1.623552813481971, 0, 0, 0],
[1083, 2, 0.04034141649573272, 2.017070824786636, 0, 0, 0],
[1084, 2, 0.0383703068502718, 1.9185153425135901, 0, 0, 0],
[1085, 2, 0.007239283505967098, 0.3619641752983549, 0, 0, 0],
[1086, 2, 0.01436208920263519, 0.7181044601317595, 0, 0, 0],
[1087, 2, 0.007427186304799236, 0.3713593152399618, 0, 0, 0],
[1088, 3, 0.0023416461987310717, 0.11708230993655358, 2.22, 61.69, 0.004502],
[1089, 2, 0.024474821190373128, 1.2237410595186564, 0, 0, 0],
[1090, 2, 0.005674885746854652, 0.2837442873427326, 0, 0, 0],
[1091, 3, 0.002915330196419503, 0.14576650982097517, 2.22, 61.69, 0.004502],
[1092, 2, 0.003437876146252996, 0.1718938073126498, 0, 0, 0],
[1093, 2, 0.009906140914748767, 0.49530704573743833, 0, 0, 0],
[1094, 3, 0.00023930778294026586, 0.011965389147013294, 2.22, 61.69, 0.004502],
[1095, 3, 1.3047613994501091e-05, 0.0006523806997250545, 2.22, 61.69, 0.004502],
[1096, 2, 0.005379826679377905, 0.2689913339688953, 0, 0, 0],
[1097, 3, 0.0002929164939619051, 0.014645824698095257, 2.22, 61.69, 0.004502],
[1098, 2, 0.004521623727146264, 0.22608118635731317, 0, 0, 0],
[1099, 2, 0.018521637260932335, 0.9260818630466169, 0, 0, 0],
[1100, 3, 8.693874816027041e-07, 4.346937408013521e-05, 2.22, 61.69, 0.004502],
[1101, 2, 0.005343192104787693, 0.2671596052393847, 0, 0, 0],
[1102, 2, 0.02234407998394998, 1.1172039991974991, 0, 0, 0],
[1103, 2, 0.01562148424141561, 0.7810742120707805, 0, 0, 0],
[1104, 3, 1.3172819714966009e-05, 0.0006586409857483004, 2.22, 61.69, 0.004502],
[1105, 3, 0.0001386935566767763, 0.006934677833838815, 2.22, 61.69, 0.004502],
[1106, 3, 0.00014577275883068604, 0.0072886379415343025, 2.22, 61.69, 0.004502],
[1107, 2, 0.004852418696402547, 0.24262093482012728, 0, 0, 0],
[1108, 2, 0.02039874588539438, 1.019937294269719, 0, 0, 0],
[1109, 3, 4.9542410867097304e-05, 0.002477120543354865, 2.22, 61.69, 0.004502],
[1110, 3, 0.00010533237807450261, 0.00526661890372513, 2.22, 61.69, 0.004502],
[1111, 2, 0.005706531882583417, 0.2853265941291709, 0, 0, 0],
[1112, 2, 0.004426690383932842, 0.2213345191966421, 0, 0, 0],
[1113, 3, 0.00022513170529279912, 0.011256585264639957, 2.22, 61.69, 0.004502],
[1114, 3, 0.0008560555102861403, 0.042802775514307015, 2.22, 61.69, 0.004502],
[1115, 2, 0.0032197222090973076, 0.16098611045486538, 0, 0, 0],
[1116, 3, 0.002075453185310181, 0.10377265926550905, 2.22, 61.69, 0.004502],
[1117, 2, 0.005780032679669937, 0.2890016339834969, 0, 0, 0],
[1118, 3, 0.0005554515385863421, 0.027772576929317106, 2.22, 61.69, 0.004502],
[1119, 3, 0.0027536366373517632, 0.13768183186758817, 2.22, 61.69, 0.004502],
[1120, 3, 0.0001538074296570127, 0.007690371482850636, 2.22, 61.69, 0.004502],
[1121, 3, 3.4414977793908876e-05, 0.0017207488896954439, 2.22, 61.69, 0.004502],
[1122, 3, 9.313004041299959e-05, 0.00465650202064998, 2.22, 61.69, 0.004502],
[1123, 3, 9.32225252447514e-05, 0.00466112626223757, 2.22, 61.69, 0.004502],
[1124, 3, 8.201464578534214e-05, 0.004100732289267108, 2.22, 61.69, 0.004502],
[1125, 3, 0.0016436821796102436, 0.08218410898051219, 2.22, 61.69, 0.004502],
[1126, 3, 0.0018560581327172175, 0.09280290663586088, 2.22, 61.69, 0.004502],
[1127, 2, 0.006703391093283916, 0.3351695546641958, 0, 0, 0],
[1128, 3, 0.0001948941120002845, 0.009744705600014225, 2.22, 61.69, 0.004502],
[1129, 3, 0.0003016780123772693, 0.015083900618863466, 2.22, 61.69, 0.004502],
[1130, 3, 6.530151955301432e-05, 0.003265075977650716, 2.22, 61.69, 0.004502],
[1131, 3, 0.00018443373362804407, 0.009221686681402204, 2.22, 61.69, 0.004502],
[1132, 3, 2.2886271300209156e-05, 0.0011443135650104578, 2.22, 61.69, 0.004502],
[1133, 3, 4.5810964480308454e-05, 0.002290548224015423, 2.22, 61.69, 0.004502],
[1134, 3, 3.236913111220881e-05, 0.0016184565556104404, 2.22, 61.69, 0.004502],
[1135, 3, 0.0005167964323996007, 0.025839821619980042, 2.22, 61.69, 0.004502],
[1136, 3, 2.5636662405410735e-05, 0.0012818331202705368, 2.22, 61.69, 0.004502],
[1137, 3, 0.00023357652984116472, 0.011678826492058236, 2.22, 61.69, 0.004502],
[1138, 3, 7.98498118498449e-05, 0.003992490592492246, 2.22, 61.69, 0.004502],
[1139, 3, 0.0012619566606414858, 0.0630978330320743, 2.22, 61.69, 0.004502],
[1140, 3, 0.0018073289497007397, 0.09036644748503699, 2.22, 61.69, 0.004502],
[1141, 2, 0.0076053500901520025, 0.38026750450760016, 0, 0, 0],
[1142, 3, 7.73959943559724e-05, 0.00386979971779862, 2.22, 61.69, 0.004502],
[1143, 3, 0.0016067873237582107, 0.08033936618791054, 2.22, 61.69, 0.004502],
[1144, 2, 0.00334399697192306, 0.16719984859615303, 0, 0, 0],
[1146, 3, 5.4833151376821656e-05, 0.002741657568841083, 2.22, 61.69, 0.004502],
[1147, 3, 0.002909588342312674, 0.14547941711563372, 2.22, 61.69, 0.004502],
[1148, 3, 0.0011233492673683868, 0.05616746336841934, 2.22, 61.69, 0.004502],
[1149, 3, 0.0005447417794635118, 0.02723708897317559, 2.22, 61.69, 0.004502],
[1150, 3, 0.0002306193019977063, 0.011530965099885314, 2.22, 61.69, 0.004502],
[1151, 3, 0.0008299047575760064, 0.04149523787880033, 2.22, 61.69, 0.004502],
[1152, 3, 7.417749437366368e-06, 0.0003708874718683184, 2.22, 61.69, 0.004502],
[1153, 3, 4.37920348658174e-06, 0.000218960174329087, 2.22, 61.69, 0.004502],
[1154, 3, 1.0225677287248534e-05, 0.0005112838643624266, 2.22, 61.69, 0.004502],
[1155, 3, 3.879887736397654e-05, 0.001939943868198827, 2.22, 61.69, 0.004502],
[1156, 3, 0.0010200134924871187, 0.05100067462435595, 2.22, 61.69, 0.004502],
[1157, 3, 0.00027719360593007886, 0.013859680296503944, 2.22, 61.69, 0.004502],
[1158, 3, 6.640198284893194e-05, 0.003320099142446597, 2.22, 61.69, 0.004502],
[1159, 3, 0.0008593149079194712, 0.04296574539597356, 2.22, 61.69, 0.004502],
[1160, 2, 0.015175599618213626, 0.7587799809106813, 0, 0, 0],
[1161, 3, 0.001608317428775011, 0.08041587143875056, 2.22, 61.69, 0.004502],
[1162, 2, 0.031984361657767045, 1.5992180828883522, 0, 0, 0],
[1163, 2, 0.021010485834812704, 1.0505242917406352, 0, 0, 0],
[1164, 2, 0.018183478445661972, 0.9091739222830987, 0, 0, 0],
[1165, 2, 0.003640738012495192, 0.18203690062475963, 0, 0, 0],
[1166, 2, 0.005301588846150501, 0.26507944230752506, 0, 0, 0],
[1167, 3, 0.00032173361521807824, 0.016086680760903912, 2.22, 61.69, 0.004502],
[1168, 3, 8.56746647323757e-05, 0.004283733236618785, 2.22, 61.69, 0.004502],
[1169, 3, 0.00017327803824915608, 0.008663901912457804, 2.22, 61.69, 0.004502],
[1170, 3, 1.6933420442211857e-05, 0.000846671022110593, 2.22, 61.69, 0.004502],
[1171, 3, 0.0005748603194505088, 0.02874301597252544, 2.22, 61.69, 0.004502],
[1172, 3, 0.00018443985354437561, 0.009221992677218781, 2.22, 61.69, 0.004502],
[1173, 2, 0.01618626952698487, 0.8093134763492436, 0, 0, 0],
[1174, 3, 8.021928882473966e-05, 0.004010964441236983, 2.22, 61.69, 0.004502],
[1175, 3, 5.445989361520192e-05, 0.002722994680760096, 2.22, 61.69, 0.004502],
[1176, 3, 1.4783581244732665e-05, 0.0007391790622366333, 2.22, 61.69, 0.004502],
[1177, 3, 0.0017745146198091144, 0.08872573099045572, 2.22, 61.69, 0.004502],
[1178, 3, 0.00020168108435446162, 0.010084054217723081, 2.22, 61.69, 0.004502],
[1179, 3, 8.316119408334767e-05, 0.004158059704167384, 2.22, 61.69, 0.004502],
[1180, 3, 4.3834108298364086e-05, 0.002191705414918204, 2.22, 61.69, 0.004502],
[1181, 2, 0.00545834972439398, 0.272917486219699, 0, 0, 0],
[1182, 2, 0.006322880792722177, 0.3161440396361089, 0, 0, 0],
[1183, 3, 0.0024333246840658566, 0.12166623420329284, 2.22, 61.69, 0.004502],
[1184, 3, 0.00026859021396164037, 0.013429510698082018, 2.22, 61.69, 0.004502],
[1185, 3, 0.0007221796423758263, 0.036108982118791315, 2.22, 61.69, 0.004502],
[1186, 3, 0.0024774929167619207, 0.12387464583809603, 2.22, 61.69, 0.004502],
[1187, 3, 0.0006248151564821885, 0.031240757824109424, 2.22, 61.69, 0.004502],
[1188, 2, 0.011440868435801076, 0.5720434217900537, 0, 0, 0],
[1190, 2, 0.008560072385215577, 0.4280036192607789, 0, 0, 0],
[1191, 2, 0.0038330920716145105, 0.19165460358072556, 0, 0, 0],
[1192, 3, 0.000624063467954237, 0.03120317339771185, 2.22, 61.69, 0.004502],
[1193, 3, 6.947815367894934e-05, 0.003473907683947467, 2.22, 61.69, 0.004502],
[1194, 3, 0.000256964892583124, 0.0128482446291562, 2.22, 61.69, 0.004502],
[1195, 3, 1.1141854949920964e-05, 0.0005570927474960482, 2.22, 61.69, 0.004502],
[1196, 2, 0.010230349597894291, 0.5115174798947145, 0, 0, 0],
[1197, 2, 0.005767282789943071, 0.2883641394971536, 0, 0, 0],
[1198, 3, 0.002534966273924786, 0.12674831369623932, 2.22, 61.69, 0.004502],
[1199, 2, 0.012822920004466005, 0.6411460002233003, 0, 0, 0],
[1200, 2, 0.0035658606694853635, 0.1782930334742682, 0, 0, 0],
[1201, 3, 0.0007611009713161243, 0.038055048565806215, 2.22, 61.69, 0.004502],
[1202, 3, 0.0014194829353529731, 0.07097414676764867, 2.22, 61.69, 0.004502],
[1203, 2, 0.005555429787358444, 0.2777714893679222, 0, 0, 0],
[1204, 3, 0.0030266063343556363, 0.15133031671778183, 2.22, 61.69, 0.004502],
[1205, 3, 2.3205910643854753e-05, 0.0011602955321927378, 2.22, 61.69, 0.004502],
[1206, 3, 0.00011069474644339766, 0.005534737322169883, 2.22, 61.69, 0.004502],
[1207, 3, 0.00010350861794424089, 0.005175430897212044, 2.22, 61.69, 0.004502],
[1208, 3, 9.345445217179975e-05, 0.004672722608589988, 2.22, 61.69, 0.004502],
[1209, 3, 3.356328110301583e-05, 0.0016781640551507917, 2.22, 61.69, 0.004502],
[1210, 3, 0.0004892458573625359, 0.024462292868126796, 2.22, 61.69, 0.004502],
[1211, 3, 0.0011462484513341364, 0.057312422566706815, 2.22, 61.69, 0.004502],
[1212, 2, 0.005203565741426465, 0.2601782870713233, 0, 0, 0],
[1213, 2, 0.0036505499187602444, 0.18252749593801224, 0, 0, 0],
[1214, 3, 0.00015044086223803754, 0.0075220431119018775, 2.22, 61.69, 0.004502],
[1215, 3, 7.654799543184605e-05, 0.0038273997715923026, 2.22, 61.69, 0.004502],
[1216, 2, 0.0025980154229842014, 0.1299007711492101, 0, 0, 0],
[1217, 3, 0.0016051045195969729, 0.08025522597984863, 2.22, 61.69, 0.004502],
[1218, 3, 4.0950816488115195e-05, 0.00204754082440576, 2.22, 61.69, 0.004502],
[1219, 3, 0.0007855588922898729, 0.03927794461449365, 2.22, 61.69, 0.004502],
[1220, 3, 0.001947919590347708, 0.0973959795173854, 2.22, 61.69, 0.004502],
[1221, 2, 0.01908382726469443, 0.9541913632347216, 0, 0, 0],
[1222, 2, 0.013436354905899806, 0.6718177452949904, 0, 0, 0],
[1223, 3, 0.0001838392909285677, 0.009191964546428387, 2.22, 61.69, 0.004502],
[1224, 2, 0.004170828516521499, 0.20854142582607496, 0, 0, 0],
[1225, 3, 0.0022238071565315737, 0.1111903578265787, 2.22, 61.69, 0.004502],
[1226, 3, 0.0002535566380389208, 0.012677831901946041, 2.22, 61.69, 0.004502],
[1227, 3, 0.0011129900410750567, 0.05564950205375283, 2.22, 61.69, 0.004502],
[1228, 3, 0.00019234621639044032, 0.009617310819522017, 2.22, 61.69, 0.004502],
[1229, 2, 0.00326230849376, 0.16311542468800003, 0, 0, 0],
[1230, 3, 5.633590875939478e-05, 0.002816795437969739, 2.22, 61.69, 0.004502],
[1231, 3, 0.001837728066564351, 0.09188640332821756, 2.22, 61.69, 0.004502],
[1232, 2, 0.0033135228268258853, 0.16567614134129427, 0, 0, 0],
[1233, 2, 0.03662908231521014, 1.831454115760507, 0, 0, 0],
[1235, 3, 0.0005753349157073776, 0.028766745785368877, 2.22, 61.69, 0.004502],
[1236, 2, 0.005234608320670995, 0.26173041603354974, 0, 0, 0],
[1237, 3, 0.0009298092078685558, 0.04649046039342779, 2.22, 61.69, 0.004502],
[1238, 2, 0.00873349739064102, 0.43667486953205104, 0, 0, 0],
[1239, 3, 0.0001443666373276477, 0.007218331866382386, 2.22, 61.69, 0.004502],
[1240, 2, 0.021613910382114798, 1.08069551910574, 0, 0, 0],
[1241, 2, 0.024532881090784327, 1.2266440545392163, 0, 0, 0],
[1242, 3, 0.001089840847144528, 0.0544920423572264, 2.22, 61.69, 0.004502],
[1243, 2, 0.004104248459975932, 0.2052124229987966, 0, 0, 0],
[1244, 2, 0.020592901244747865, 1.0296450622373932, 0, 0, 0],
[1245, 3, 0.0005144458090049472, 0.025722290450247362, 2.22, 61.69, 0.004502],
[1246, 2, 0.003636870278584459, 0.18184351392922293, 0, 0, 0],
[1247, 3, 0.0013899571448864774, 0.06949785724432388, 2.22, 61.69, 0.004502],
[1248, 2, 0.005854245631350222, 0.2927122815675111, 0, 0, 0],
[1249, 2, 0.004846915908139961, 0.24234579540699805, 0, 0, 0],
[1250, 3, 0.0019627317861894665, 0.09813658930947333, 2.22, 61.69, 0.004502],
[1251, 3, 0.0014899668826355728, 0.07449834413177864, 2.22, 61.69, 0.004502],
[1252, 3, 0.0009477821555247328, 0.047389107776236644, 2.22, 61.69, 0.004502],
[1253, 2, 0.004106369053307717, 0.20531845266538587, 0, 0, 0],
[1254, 2, 0.005238024431161238, 0.2619012215580619, 0, 0, 0],
[1255, 3, 0.0002430881191708174, 0.01215440595854087, 2.22, 61.69, 0.004502],
[1256, 3, 0.0009607764830526361, 0.048038824152631804, 2.22, 61.69, 0.004502],
[1257, 2, 0.005662916214121937, 0.28314581070609685, 0, 0, 0],
[1258, 2, 0.014991588973313675, 0.7495794486656838, 0, 0, 0],
[1259, 2, 0.00695753592752513, 0.34787679637625657, 0, 0, 0],
[1260, 3, 0.0012839803779623614, 0.06419901889811806, 2.22, 61.69, 0.004502],
[1261, 2, 0.009975116408949375, 0.4987558204474688, 0, 0, 0],
[1262, 3, 3.0364756627642744e-05, 0.0015182378313821372, 2.22, 61.69, 0.004502],
[1263, 3, 2.087578084141728e-05, 0.001043789042070864, 2.22, 61.69, 0.004502],
[1264, 2, 0.00260783494651335, 0.13039174732566752, 0, 0, 0],
[1265, 3, 0.0002973288100213264, 0.014866440501066322, 2.22, 61.69, 0.004502],
[1266, 2, 0.0059815616312556405, 0.2990780815627821, 0, 0, 0],
[1267, 3, 0.002512674942558201, 0.12563374712791006, 2.22, 61.69, 0.004502],
[1268, 3, 0.00010891372042828892, 0.005445686021414446, 2.22, 61.69, 0.004502],
[1269, 3, 0.00017998003182560332, 0.008999001591280167, 2.22, 61.69, 0.004502],
[1270, 3, 0.0015555866745856353, 0.07777933372928177, 2.22, 61.69, 0.004502],
[1271, 3, 0.0023078587919887488, 0.11539293959943743, 2.22, 61.69, 0.004502],
[1272, 3, 7.708125005687788e-05, 0.0038540625028438942, 2.22, 61.69, 0.004502],
[1273, 3, 0.00013809561181086458, 0.006904780590543229, 2.22, 61.69, 0.004502],
[1274, 2, 0.0033801727100761705, 0.1690086355038085, 0, 0, 0],
[1275, 2, 0.006307329492962109, 0.3153664746481055, 0, 0, 0],
[1276, 3, 0.001633288835647369, 0.08166444178236844, 2.22, 61.69, 0.004502],
[1277, 2, 0.004176942042758357, 0.20884710213791788, 0, 0, 0],
[1278, 2, 0.010850406134369231, 0.5425203067184615, 0, 0, 0],
[1279, 3, 1.1463493624723957e-07, 5.731746812361978e-06, 2.22, 61.69, 0.004502],
[1280, 3, 1.868718859531834e-05, 0.0009343594297659171, 2.22, 61.69, 0.004502],
[1281, 3, 9.093116541113913e-05, 0.004546558270556957, 2.22, 61.69, 0.004502],
[1282, 3, 0.00014093297147014944, 0.007046648573507473, 2.22, 61.69, 0.004502],
[1283, 2, 0.08261824948992594, 4.130912474496298, 0, 0, 0],
[1284, 3, 0.0015717663769534826, 0.07858831884767413, 2.22, 61.69, 0.004502],
[1285, 3, 9.75940596002738e-05, 0.00487970298001369, 2.22, 61.69, 0.004502],
[1286, 3, 0.00056207927347984, 0.028103963673991997, 2.22, 61.69, 0.004502],
[1287, 2, 0.005933272587501368, 0.29666362937506835, 0, 0, 0],
[1288, 2, 0.00944760882155904, 0.472380441077952, 0, 0, 0],
[1289, 2, 0.011723304434111076, 0.5861652217055537, 0, 0, 0],
[1290, 3, 0.0003120693634598793, 0.015603468172993969, 2.22, 61.69, 0.004502],
[1291, 2, 0.0062575490505418305, 0.31287745252709154, 0, 0, 0],
[1292, 3, 0.002653563231501149, 0.13267816157505744, 2.22, 61.69, 0.004502],
[1293, 3, 0.00015292290721046804, 0.007646145360523402, 2.22, 61.69, 0.004502],
[1294, 3, 0.0003436110439431119, 0.017180552197155596, 2.22, 61.69, 0.004502],
[1295, 3, 0.00037392918854889465, 0.01869645942744473, 2.22, 61.69, 0.004502],
[1296, 3, 0.0010457992969089768, 0.052289964845448844, 2.22, 61.69, 0.004502],
[1297, 2, 0.008463031765096106, 0.42315158825480526, 0, 0, 0],
[1298, 3, 0.00013751056653147346, 0.006875528326573674, 2.22, 61.69, 0.004502],
[1299, 3, 8.042050285098182e-05, 0.004021025142549092, 2.22, 61.69, 0.004502],
[1300, 3, 0.001511593201166196, 0.07557966005830981, 2.22, 61.69, 0.004502],
[1301, 2, 0.0038746782543149596, 0.193733912715748, 0, 0, 0],
[1302, 3, 0.0003104985267932093, 0.015524926339660468, 2.22, 61.69, 0.004502],
[1303, 3, 0.00027600750632746427, 0.013800375316373212, 2.22, 61.69, 0.004502],
[1304, 3, 0.0005078864968649606, 0.02539432484324803, 2.22, 61.69, 0.004502],
[1305, 3, 1.749097506605957e-07, 8.745487533029786e-06, 2.22, 61.69, 0.004502],
[1306, 3, 0.00011631130798083146, 0.005815565399041573, 2.22, 61.69, 0.004502],
[1307, 3, 1.9031130574577255e-05, 0.0009515565287288628, 2.22, 61.69, 0.004502],
[1308, 3, 0.00020870441847665842, 0.010435220923832922, 2.22, 61.69, 0.004502],
[1309, 3, 0.00010482675430356303, 0.0052413377151781515, 2.22, 61.69, 0.004502],
[1310, 3, 5.147051905645713e-05, 0.0025735259528228568, 2.22, 61.69, 0.004502],
[1311, 3, 0.0007546493032032542, 0.037732465160162716, 2.22, 61.69, 0.004502],
[1312, 2, 0.016696303623916272, 0.8348151811958137, 0, 0, 0],
[1313, 3, 0.0016210816774095515, 0.08105408387047756, 2.22, 61.69, 0.004502],
[1314, 3, 0.0006172994939148042, 0.030864974695740212, 2.22, 61.69, 0.004502],
[1315, 3, 0.000380566510448486, 0.019028325522424304, 2.22, 61.69, 0.004502],
[1316, 3, 9.740975241149951e-05, 0.004870487620574976, 2.22, 61.69, 0.004502],
[1317, 3, 0.0015252502049763412, 0.07626251024881707, 2.22, 61.69, 0.004502],
[1318, 3, 6.290104010825409e-05, 0.003145052005412705, 2.22, 61.69, 0.004502],
[1319, 3, 0.001127343871228203, 0.05636719356141015, 2.22, 61.69, 0.004502],
[1320, 3, 0.0012259126879493251, 0.061295634397466256, 2.22, 61.69, 0.004502],
[1321, 3, 1.025741798764967e-05, 0.0005128708993824835, 2.22, 61.69, 0.004502],
[1322, 3, 5.594974899273161e-05, 0.002797487449636581, 2.22, 61.69, 0.004502],
[1323, 2, 0.012675857799799822, 0.6337928899899912, 0, 0, 0],
[1324, 3, 0.00040653674535257717, 0.020326837267628857, 2.22, 61.69, 0.004502],
[1325, 2, 0.003627520579853253, 0.18137602899266267, 0, 0, 0],
[1326, 2, 0.0036242041289439157, 0.1812102064471958, 0, 0, 0],
[1327, 2, 0.0032338308031027566, 0.16169154015513784, 0, 0, 0],
[1328, 3, 0.0010226241895011407, 0.05113120947505704, 2.22, 61.69, 0.004502],
[1329, 2, 0.007079557010034116, 0.3539778505017058, 0, 0, 0],
[1330, 3, 0.0019182008434651947, 0.09591004217325974, 2.22, 61.69, 0.004502],
[1331, 3, 1.841349064624893e-05, 0.0009206745323124464, 2.22, 61.69, 0.004502],
[1332, 3, 0.0009497182757176749, 0.04748591378588375, 2.22, 61.69, 0.004502],
[1333, 3, 0.002360221646613738, 0.1180110823306869, 2.22, 61.69, 0.004502],
[1334, 3, 3.341080749010565e-05, 0.0016705403745052828, 2.22, 61.69, 0.004502],
[1335, 3, 0.00011853400646654089, 0.005926700323327045, 2.22, 61.69, 0.004502],
[1336, 3, 0.001449982244023167, 0.07249911220115836, 2.22, 61.69, 0.004502],
[1337, 2, 0.007722987880773172, 0.3861493940386586, 0, 0, 0],
[1338, 3, 4.583863270992302e-05, 0.002291931635496151, 2.22, 61.69, 0.004502],
[1339, 3, 0.0006421253879349708, 0.032106269396748544, 2.22, 61.69, 0.004502],
[1340, 2, 0.004462598113304154, 0.22312990566520774, 0, 0, 0],
[1341, 2, 0.013083384367936227, 0.6541692183968114, 0, 0, 0],
[1342, 3, 2.7612064279465152e-05, 0.0013806032139732577, 2.22, 61.69, 0.004502],
[1343, 3, 2.9331857710053014e-05, 0.0014665928855026507, 2.22, 61.69, 0.004502],
[1344, 3, 1.2445550750348109e-05, 0.0006222775375174054, 2.22, 61.69, 0.004502],
[1345, 3, 0.00014463036339991251, 0.007231518169995625, 2.22, 61.69, 0.004502],
[1346, 2, 0.013669449762218379, 0.6834724881109189, 0, 0, 0],
[1347, 2, 0.02636344185792537, 1.3181720928962688, 0, 0, 0],
[1348, 3, 0.0014456315404578254, 0.07228157702289127, 2.22, 61.69, 0.004502],
[1349, 3, 0.0026962338610516797, 0.13481169305258398, 2.22, 61.69, 0.004502],
[1350, 3, 6.046050411995944e-06, 0.0003023025205997972, 2.22, 61.69, 0.004502],
[1351, 3, 4.4227584989846377e-07, 2.2113792494923187e-05, 2.22, 61.69, 0.004502],
[1352, 3, 2.3229448219825517e-05, 0.001161472410991276, 2.22, 61.69, 0.004502],
[1354, 3, 3.996640048876418e-06, 0.00019983200244382086, 2.22, 61.69, 0.004502],
[1355, 3, 5.462032020538979e-05, 0.0027310160102694903, 2.22, 61.69, 0.004502],
[1356, 2, 0.004678278776831856, 0.23391393884159278, 0, 0, 0],
[1357, 2, 0.003594349677217709, 0.17971748386088549, 0, 0, 0],
[1358, 3, 8.033805037290232e-06, 0.00040169025186451157, 2.22, 61.69, 0.004502],
[1359, 2, 0.004496673943395517, 0.22483369716977586, 0, 0, 0],
[1360, 3, 0.0010909105792324338, 0.054545528961621695, 2.22, 61.69, 0.004502],
[1361, 2, 0.0040238936307783425, 0.20119468153891715, 0, 0, 0],
[1362, 2, 0.005036121783141224, 0.2518060891570612, 0, 0, 0],
[1363, 3, 2.301886324440155e-06, 0.00011509431622200775, 2.22, 61.69, 0.004502],
[1364, 3, 3.887723536233725e-06, 0.00019438617681168623, 2.22, 61.69, 0.004502],
[1365, 3, 2.8999446623259055e-08, 1.449972331162953e-06, 2.22, 61.69, 0.004502],
[1366, 3, 7.830373844390861e-05, 0.003915186922195431, 2.22, 61.69, 0.004502],
[1367, 3, 0.0027924620350495274, 0.13962310175247636, 2.22, 61.69, 0.004502],
[1368, 3, 0.00011593822539128845, 0.005796911269564423, 2.22, 61.69, 0.004502],
[1369, 3, 0.0005073133310147165, 0.025365666550735824, 2.22, 61.69, 0.004502],
[1370, 3, 1.4656649091035632e-05, 0.0007328324545517816, 2.22, 61.69, 0.004502],
[1371, 2, 0.005205462164069383, 0.26027310820346916, 0, 0, 0],
[1372, 2, 0.012284634505654547, 0.6142317252827274, 0, 0, 0],
[1373, 3, 0.0022409179594482334, 0.11204589797241167, 2.22, 61.69, 0.004502],
[1374, 2, 0.006889508467327262, 0.3444754233663631, 0, 0, 0],
[1375, 2, 0.003897629175102736, 0.1948814587551368, 0, 0, 0],
[1376, 2, 0.011218109707548912, 0.5609054853774457, 0, 0, 0],
[1377, 2, 0.01492085689824784, 0.7460428449123921, 0, 0, 0],
[1378, 2, 0.01566275025445262, 0.783137512722631, 0, 0, 0],
[1379, 3, 5.1310566028095876e-05, 0.002565528301404794, 2.22, 61.69, 0.004502],
[1380, 3, 7.724465320438908e-05, 0.003862232660219454, 2.22, 61.69, 0.004502],
[1381, 3, 6.446222679588771e-05, 0.003223111339794386, 2.22, 61.69, 0.004502],
[1382, 2, 0.008838822964419164, 0.4419411482209583, 0, 0, 0],
[1383, 2, 0.006991449967869686, 0.34957249839348425, 0, 0, 0],
[1384, 3, 0.0002972463393517766, 0.014862316967588829, 2.22, 61.69, 0.004502],
[1385, 3, 7.92302201959824e-06, 0.0003961511009799121, 2.22, 61.69, 0.004502],
[1386, 3, 4.2899112828393286e-05, 0.002144955641419664, 2.22, 61.69, 0.004502],
[1387, 3, 0.00022240699424911273, 0.011120349712455638, 2.22, 61.69, 0.004502],
[1388, 3, 5.909025672850305e-05, 0.0029545128364251525, 2.22, 61.69, 0.004502],
[1389, 3, 1.3594135764164036e-05, 0.0006797067882082019, 2.22, 61.69, 0.004502],
[1390, 3, 0.00023763846235409512, 0.011881923117704758, 2.22, 61.69, 0.004502],
[1391, 3, 3.321367742134543e-05, 0.0016606838710672715, 2.22, 61.69, 0.004502],
[1392, 3, 0.0012290826914265437, 0.06145413457132718, 2.22, 61.69, 0.004502],
[1393, 3, 8.763130962106806e-05, 0.004381565481053403, 2.22, 61.69, 0.004502],
[1394, 3, 6.862035771367977e-05, 0.003431017885683988, 2.22, | |
<filename>digsby/src/gui/pref/prefcontrols.py<gh_stars>10-100
'''
Utility functions for building GUI controls bound to the preference dictionary.
'''
from __future__ import with_statement
from wx import Choice, EXPAND, LEFT, EVT_CHOICE, BOTTOM, ALIGN_CENTER_VERTICAL, ALL, \
EVT_LEFT_DOWN, EVT_LEFT_UP, HORIZONTAL,VERTICAL, Rect, RectS
from wx import StaticBox, StaticBoxSizer, StaticText, TOP, Window
import wx, re
from util.primitives.error_handling import traceguard
from util.primitives.funcs import autoassign, isiterable, do
from util.primitives.misc import clamp
import logging; log = logging.getLogger("prefs")
from itertools import izip
from gui.toolbox import wx_prop
from gui.textutil import CopyFont
from gui.anylists import AnyList
from gui.validators import LengthLimit
from common import profile
from config import platformName
#log.setLevel(logging.DEBUG)
info = log.info; warning = log.warning
__metaclass__ = type
# matcher for formatting strings like %2(name.subname)d
_fmtmatcher = re.compile(r'%(\d*)\(([A-Za-z_]?[A-Za-z_0-9\.]+[A-Za-z_0-9])\)([sdf])')
typechar_to_type = dict(f=float, d=int)
from contextlib import contextmanager
class NullEvtHandler(wx.EvtHandler):
def AddPendingEvent(self, evt):
print 'SSSHHH secret', evt
@contextmanager
def secret(ctrl):
ctrl.PushEventHandler(NullEvtHandler())
try:
yield
finally:
ctrl.PopEventHandler()
wx.Control.secret = secret
# These functions are the only interface to the outside preferences
def pref_mapping(key):
if not isinstance(key, str):
raise TypeError('prefname must be a string')
if key.startswith('local.'):
key = key[len('local.'):]
mapping = profile.localprefs
else:
mapping = profile.prefs
return key, mapping
def mark_pref(prefname, value):
'Change a preference!'
if not isinstance(prefname, str):
raise TypeError('prefname must be a string')
prefname, mapping = pref_mapping(prefname)
# Set and log the change
mapping[prefname] = value
log.info('%s ---> %s', prefname, mapping[prefname])
def get_pref(prefname, default=sentinel):
'Get a preference!'
prefname, mapping = pref_mapping(prefname)
try:
v = mapping[prefname]
except KeyError, e:
if default is sentinel:
try:
return profile.defaultprefs[prefname]
except KeyError:
raise e
else: v = default
return v
def get_prefs():
return profile.prefs
def get_localprefs():
return profile.localprefs
def wx_evt_type(component):
if isinstance(component, wx.Sizer):
component = component.Children[0].Window
if type(component) is not type:
component = type(component)
types = {
wx.CheckBox : wx.EVT_CHECKBOX,
}
return types[component]
#
# GUI elements
#
class PrefsPanel(wx.Panel):
'Simple backing wxPanel for pref controls.'
def __init__(self, parent):
wx.Panel.__init__(self, parent, size=(0,0), pos=(-50,50))
if 'wxMac' not in wx.PlatformInfo:
self.BackgroundColour = wx.WHITE
class PrefGroup(object):
SPACING = 4
def __init__(self, title, *elems):
if isinstance(title, tuple):
title, prefix = title
else:
prefix = ''
autoassign(self, locals())
if 'wxMSW' in wx.PlatformInfo:
# this implementation uses a wx.StaticBox
def build(self, parent):
box = StaticBox(parent, -1, self.title)
bsizer = StaticBoxSizer(box, VERTICAL)
self.add_to_sizer(parent, bsizer)
return bsizer
else:
# this implementation does not
def build(self, parent):
sz = VSizer()
text = StaticText(parent, -1, self.title)
text.SetBold()
sz.Add(text)
self.add_to_sizer(parent, sz)
return sz
def __call__(self, parent):
return self.build(parent)
def add_to_sizer(self, parent, sz):
for elem in self.elems:
if callable(elem):
if hasattr(elem, 'func_code') and elem.func_code.co_argcount == 2:
elem = elem(parent, self.prefix)
else:
elem = elem(parent)
sz.Add(elem, 0 if len(self.elems) > 1 else 1, EXPAND | ALL, self.SPACING)
class EnabledGroup(PrefGroup):
SPACING = 3
def __init__(self, title, required, *elems):
PrefGroup.__init__(self, title, *elems)
try:
self.req_cmp, self.req_cb = required
except TypeError:
self.req_cmp = required
self.req_cb = lambda v, *a: v
def build(self, parent):
sz = VSizer()
if self.title:
text = StaticText(parent, -1, self.title)
text.SetBold()
sz.Add(text)
if callable(self.req_cmp):
if self.req_cmp.func_code.co_argcount == 2:
self.req_cmp = self.req_cmp(parent, self.prefix)
else:
self.req_cmp = self.req_cmp(parent)
sz.Add(self.req_cmp, 0, EXPAND | TOP, 7)
spc_sz = HSizer()
sz.Add(spc_sz,0, EXPAND)
spc_sz.AddSpacer(int(self.req_cmp.Size.height*1.5))
cmp_sz = VSizer()
spc_sz.Add(cmp_sz,1)
self.add_to_sizer(parent, cmp_sz)
return sz
def add_to_sizer(self, parent, sz):
components = [] # collection of wxObjects
for elem in self.elems:
if callable(elem):
if elem.func_code.co_argcount == 2:
elem = elem(parent, self.prefix)
else:
elem = elem(parent)
sz.Add(elem, 0 if len(self.elems) > 1 else 1, EXPAND | TOP, border = self.SPACING)
components.append(elem)
if components:
def callback(e):
for c in components:
c.Enabled = self.req_cb(self.req_cmp.GetValue(), c)
if e: e.Skip()
self.req_cmp.Bind(wx_evt_type(self.req_cmp), callback)
callback(None)
return sz
class OffsetGroup(PrefGroup):
SPACING = 3
def __init__(self, title, header, *elems):
PrefGroup.__init__(self, title, *elems)
self.header = header
def build(self, parent):
sz = VSizer()
if self.title:
text = StaticText(parent, -1, self.title)
text.SetBold()
sz.Add(text)
sz.Add(self.header, 0, EXPAND | TOP, 7)
spc_sz = HSizer()
sz.Add(spc_sz,0, EXPAND)
spc_sz.AddSpacer(int(self.header.Size.height*1.5))
cmp_sz = VSizer()
spc_sz.Add(cmp_sz,1)
self.add_to_sizer(parent, cmp_sz)
return sz
def add_to_sizer(self, parent, sz):
components = [] # collection of wxObjects
for elem in self.elems:
sz.Add(elem, 0 if len(self.elems) > 1 else 1, EXPAND | TOP, border = self.SPACING)
components.append(elem)
return sz
def MakeEnabledSizer(baseclass):
class EnabledSizer(baseclass):
def __init__(self, *a, **k):
try:
baseclass.__init__(self, *a, **k)
except:
import sys; print >> sys.stderr, baseclass
raise
self._enabled = True
def SetEnabled(self, val=True):
res = self._enabled != val
for child in self.Children:
if child.Window:
child.Window.Enabled = val
self._enabled = val
return res
def SetSelection(self, i):
do(c.Window.SetSelection(i) for c in self.Children if hasattr(getattr(c, 'Window', None), 'SetSelection'))
def GetEnabled(self):
return self._enabled
Enable = SetEnabled
Enabled = property(GetEnabled, SetEnabled)
return EnabledSizer
def HSizer(cls = wx.BoxSizer, *a, **k):
'Returns a horizontal box sizer.'
return MakeEnabledSizer(cls)(HORIZONTAL, *a, **k)
def VSizer(cls = wx.BoxSizer, *a, **k):
'Returns a vertical box sizer.'
return MakeEnabledSizer(cls)(VERTICAL, *a, **k)
def PlusMinus():
def buildplusminus(panel):
h = HSizer()
h.AddStretchSpacer(1)
plus = wx.Button(panel, wx.ID_NEW, '+', size=(30,15))
minus = wx.Button(panel, wx.ID_DELETE, '-', size=(30,15))
h.GetButtons = lambda p=plus, m=minus: (p, m)
h.Add(plus, 0, wx.ALL | wx.ALIGN_RIGHT)
h.Add(minus, 0, wx.ALL | wx.ALIGN_RIGHT)
return h
return buildplusminus
class Text(wx.TextCtrl):
'Text field. Updates its preference upon losing focus.'
def __init__(self, parent, prefname, name='', size=wx.DefaultSize, validator=None, _type=None, style = 0):
wx.TextCtrl.__init__(self, parent, size=size, validator=(validator or LengthLimit(2048)), style = style)
if not isinstance(prefname, basestring):
prefix, prefname = prefname
else:
prefix = ''
self.validator = validator
self.type = _type
self.prefix = prefix
self.prefname = prefname
self._style = style
if self.validator is not None:
self.Bind(wx.EVT_CHAR, self.validator.OnChar)
self.Bind(wx.EVT_TEXT, self.validator.OnText)
if self.pname.startswith('local.'):
self.prefname = self.pname[len('local.'):]
self.prefix = ''
self.mapping = profile.localprefs
else:
self.mapping = profile.prefs
self.mapping.link(self.pname, self.pref_changed)
self.secret = 0
self.Bind(wx.EVT_KILL_FOCUS, self.changed)
def changed(self, e):
self.mark_pref(self.Value)
def pref_changed(self, val):
self.SetValue(unicode(val))
@property
def pname(self):
'Glues a prefix on a name, if it exists.'
if self.prefname.startswith('local.'):
return self.prefname
return self.prefix + '.' + self.prefname if self.prefix else self.prefname
def mark_pref(self, val):
'''
Set the value in the preferences dictionary.
'''
if self.pname not in self.mapping:
raise KeyError("Trying to set a preference that doesn't exist: %s" % self.pname)
if self.type is not None:
try:
val = self.type(val)
except Exception:
log.error('"pref[%r] = %r" : value does not match %r, not setting it', self.pname, val, self.type)
return
if self.secret:
self.mapping.secret_set(self.pname, val)
else:
self.mapping[self.pname] = val
log.debug('%s ---> %s', self.pname, val)
def get_pref(self):
return self.mapping[self.pname]
@contextmanager
def secret(self):
self.PushEventHandler(NullEvtHandler())
self.secret += 1
try:
yield self
finally:
self.secret -= 1
self.PopEventHandler()
def Button(label,callback=None):
def makeButton(parent,prefix = ''):
button = wx.Button(parent,-1,label, style=wx.BU_EXACTFIT)
if callback:
button.Bind(wx.EVT_BUTTON,lambda e: callback(button))
return button
return makeButton
def Label(text):
def makeStext(parent,prefix=''):
return SText(parent,text)
return makeStext
WindowClass = getattr(wx, '_Window', wx.Window)
def SText(parent, text):
if not isinstance(parent, WindowClass):
raise TypeError('first arg to SText must be a wx.Window')
return StaticText(parent, -1, text)
def Check(prefname, caption, callback = None, default = False, help = None):
'''
A checkbox for a single preference.
Optionally, a %2(some.pref)d type string in the caption will be turned
into an additional textbox-linked-preference.
'''
if callback is None:
callback = lambda *a, **k: None
if caption.find('%') != -1:
def my_check(parent, prefix = ''):
p = wx.Panel(parent)
s = caption
textctrl = None
checkPref = pname(prefix, prefname) if prefname is not None else None
checkbox = boundcheckbox(p, s[:s.find('%')], checkPref,
callback=(lambda v: (textctrl.Enable(v), callback(v))),
default = default)
elems = [ checkbox ]
match = _fmtmatcher.search(s)
if match:
n, name, type = match.groups()
textctrl = Text(p, pname(prefix, name), name,
size = (int(n) * 8 +10,-1),
validator = validators[type](),
_type=typechar_to_type.get(type, lambda x:x))
textctrl.SetMaxLength(long(n))
textctrl.Enable(checkbox.GetValue())
elems += [textctrl]
elems += [wx.StaticText(p, -1, s[match.span()[1]:])]
hz = p.Sizer = HSizer()
hz.check = checkbox
hz.AddMany([(elem, 0, ALIGN_CENTER_VERTICAL) for elem in elems])
return p
else:
def my_check(parent, prefix='', default = default):
return boundcheckbox(parent, caption, pname(prefix, prefname), callback, default = default, help = help)
return my_check
def CheckChoice(checkprefname, choiceprefname, text, choices, allow_custom = False, max_width=None):
'Checkbox with dropdown after.'
def build(parent, prefix = ''):
choice = Choice(choiceprefname, choices, allow_custom = allow_custom, max_width=max_width)(parent, prefix)
choice.Enable(bool(get_pref(pname(prefix,checkprefname))))
check = Check(checkprefname, text,
lambda v: choice.Enable(v))(parent, prefix)
sz = HSizer()
sz.Add(check, 0, EXPAND | ALIGN_CENTER_VERTICAL)
sz.Add(choice, 0, EXPAND | ALIGN_CENTER_VERTICAL)
sz.Bind = check.Bind
sz.GetValue = check.GetValue
sz.SetValue = check.SetValue
return sz
return build
class _AutoCombo(wx.ComboBox):
def __init__(self, *a, **k):
k['style'] = k.get('style', 0) | wx.CB_DROPDOWN #| wx.CB_READONLY
if k.get('validator', None) is None:
k['validator'] = LengthLimit(1024)
wx.ComboBox.__init__(self, *a, **k)
self._items = self.Items
self._modifying = False
self.last_selection = self.Value
def bind_evts(self):
self.Bind(wx.EVT_TEXT, self.keypressed)
def keypressed(self, e):
e.Skip()
if self._modifying: return
bad_selection = False
self._modifying = True
oldval = self.Value
newitems = filter(lambda | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 11:15:29 2020
@author:
Dr. <NAME>
European Space Agency (ESA)
European Space Research and Technology Centre (ESTEC)
Keplerlaan 1, 2201 AZ Noordwijk, The Netherlands
Email: <EMAIL>
GitHub: mnguenther
Twitter: m_n_guenther
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: modules
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.timeseries import LombScargle
from statsmodels.graphics.tsaplots import plot_acf
#::: my modules
from ..exoworlds_rdx.lightcurves.lightcurve_tools import plot_phase_folded_lightcurve
from allesfitter.time_series import clean, sigma_clip, slide_clip
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
###############################################################################
#::: run a periodogram via astropy to get the dominant period and FAP
###############################################################################
def estimate_period(time, y, y_err, clip=True, plot=True, **kwargs):
"""
Run a Lomb-Scargle Periodogram to find periodic signals. It's recommended
to use the allesfitter.time_series functions sigma_clip and slide_clip beforehand.
Parameters
----------
time : array of float
e.g. time array (usually in days)
y : array of float
e.g. flux or RV array (usually as normalized flux or RV in km/s)
yerr : array of float
e.g. flux or RV error array (usually as normalized flux or RV in km/s)
clip : bool, optional
Automatically clip the input data with sigma_clip(low=4, high=4)
and slide_clip(window_length=1, low=4, high=4). The default is True.
plot : bool, optional
To plot or not, that is the question. The default is False.
**kwargs : collection of keyword arguments
Any keyword arguments will be passed onto the astropy periodogram class.
Returns
-------
best_period : float
The best period found.
FAP : float
The false alarm probability for the best period.
fig : matplotlib.figure object, optional
The summary figure. Only returned if plot is True.
"""
#==========================================================================
#::: clean the inputs
#==========================================================================
time, y, y_err = clean(time, y, y_err)
plot_bool = plot
if clip:
y = sigma_clip(time, y, low=4, high=4)
y = slide_clip(time, y, window_length=1, low=4, high=4)
time, y, y_err = clean(time, y, y_err)
#==========================================================================
#::: handle inputs
#==========================================================================
cadence = np.nanmedian(np.diff(time))
if kwargs is None: kwargs = {}
if 'minperiod' not in kwargs: kwargs['minperiod'] = 10. * cadence
if 'maxperiod' not in kwargs: kwargs['maxperiod'] = time[-1]-time[0]
minfreq = 1./kwargs['maxperiod']
maxfreq = 1./kwargs['minperiod']
#==========================================================================
#::: now do the periodogram
#==========================================================================
ls = LombScargle(time, y) #Analyze our dates and s-index data using the AstroPy Lomb Scargle module
frequency, power = ls.autopower(minimum_frequency=minfreq, maximum_frequency=maxfreq) #Determine the LS periodogram
best_power = np.nanmax(power)
best_frequency = frequency[np.argmax(power)]
best_period = 1./best_frequency
FAP=ls.false_alarm_probability(best_power) #Calculate the FAP for the highest peak in the power array
#==========================================================================
#::: plot
#==========================================================================
def plot():
peak_loc=round(float(1./best_frequency),2)
FAP_probabilities = [0.5, 0.1, 0.01] #Enter FAP values you want to determine
FAP_levels=ls.false_alarm_level(FAP_probabilities) #Get corresponding LS Power values
fig, axes = plt.subplots(4, 1, figsize=[10,15], tight_layout=True)
#::: plot the periodogram
ax = axes[0]
ax.semilogx(1./frequency,power,color='b')
ax.plot(peak_loc, best_power, marker='d', markersize=12, color='r')
ax.text(peak_loc*1.2,best_power*0.95,'Peak Period: '+str(peak_loc)+' days')
ax.text(peak_loc*1.2,best_power*0.85,'FAP: '+str(FAP))
ax.hlines(FAP_levels, kwargs['minperiod'], kwargs['maxperiod'], color='grey', lw=1)
ax.text(kwargs['maxperiod'], FAP_levels[0],'0.5% FAP ', ha='right')
ax.text(kwargs['maxperiod'], FAP_levels[1],'0.1% FAP ', ha='right')
ax.text(kwargs['maxperiod'], FAP_levels[2],'0.01% FAP ', ha='right')
ax.set(xlabel='Period (days)', ylabel='L-S power')
ax.tick_params(axis='both',which='major')
#::: plot the phase-folded data
ax = axes[1]
plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylim=[np.nanmin(y), np.nanmax(y)], ylabel='Data (clipped; phased)')
#::: plot the phase-folded data, zoomed
ax = axes[2]
plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylabel='Data (clipped; phased; y-zoom)')
#::: plot the autocorrelation of the data
ax = axes[3]
plot_acf(pd.Series(y, index=time), ax=ax, lags=np.linspace(start=1,stop=2*best_period/cadence,num=100,dtype=int))
ax.set(xlabel='Lag', ylabel='Autocorrelation', title='')
return fig
#==========================================================================
#::: return
#==========================================================================
if plot_bool:
fig = plot()
return best_period, FAP, fig
else:
return best_period, FAP
# ###############################################################################
# #::: run a periodogram via astropy to get the dominant period and FAP
# ###############################################################################
def estimate_period_old(time, y, y_err, periodogram_kwargs=None, astropy_kwargs=None, wotan_kwargs=None, options=None):
'''
Parameters
----------
time : TYPE
DESCRIPTION.
y : TYPE
DESCRIPTION.
y_err : TYPE
DESCRIPTION.
periodogram_kwargs : TYPE, optional
DESCRIPTION. The default is None.
astropy_kwargs : TYPE, optional
DESCRIPTION. The default is None.
wotan_kwargs : TYPE, optional
DESCRIPTION. The default is None.
options : None or dictionary, optional
The default is None, which will evaluate to:
options = {}
options['show_plot'] = True #show a plot in the terminal?
options['save_plot'] = True #save a plot?
options['fname_plot'] = 'periodogram' #filenmae of the plot
options['outdir'] = '.' #output directory for the plot
If a dictionary is given, it may contain and overwrite all these keys.
Returns
-------
None.
'''
#==========================================================================
#::: handle inputs
#==========================================================================
cadence = np.nanmedian(np.diff(time))
if periodogram_kwargs is None: periodogram_kwargs = {}
if 'minperiod' not in periodogram_kwargs: periodogram_kwargs['minperiod'] = 10. * cadence
if 'maxperiod' not in periodogram_kwargs: periodogram_kwargs['maxperiod'] = time[-1]-time[0]
if astropy_kwargs is None: astropy_kwargs = {}
if 'sigma' not in astropy_kwargs: astropy_kwargs['sigma'] = 5
if wotan_kwargs is None: wotan_kwargs = {}
if 'slide_clip' not in wotan_kwargs: wotan_kwargs['slide_clip'] = {}
if 'window_length' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['window_length'] = 1.
if 'low' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['low'] = 5
if 'high' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['high'] = 5
if options is None: options = {}
if 'show_plot' not in options: options['show_plot'] = False
if 'save_plot' not in options: options['save_plot'] = False
if 'return_plot' not in options: options['return_plot'] = False
if 'fname_plot' not in options: options['fname_plot'] = 'periodogram'
if 'outdir' not in options: options['outdir'] = '.'
minfreq = 1./periodogram_kwargs['maxperiod']
maxfreq = 1./periodogram_kwargs['minperiod']
#==========================================================================
#::: first, a global 5 sigma clip
#==========================================================================
ff = sigma_clip(time, np.ma.masked_invalid(y), low=astropy_kwargs['sigma'], high=astropy_kwargs['sigma']) #astropy wants masked arrays
# ff = np.array(ff.filled(np.nan)) #use NaN instead of masked arrays, because masked arrays drive me crazy
#==========================================================================
#::: fast slide clip (1 day, 5 sigma) [replaces Wotan's slow slide clip]
#==========================================================================
try: ff = slide_clip(time, ff, **wotan_kwargs['slide_clip'])
except: print('Fast slide clip failed and was skipped.')
#==========================================================================
#::: now do the periodogram
#==========================================================================
ind_notnan = np.where(~np.isnan(time*ff*y_err))
ls = LombScargle(time[ind_notnan], ff[ind_notnan]) #Analyze our dates and s-index data using the AstroPy Lomb Scargle module
frequency, power = ls.autopower(minimum_frequency=minfreq, maximum_frequency=maxfreq) #Determine the LS periodogram
best_power = np.nanmax(power)
best_frequency = frequency[np.argmax(power)]
FAP=ls.false_alarm_probability(best_power) #Calculate the FAP for the highest peak in the power array
#==========================================================================
#::: plots
#==========================================================================
if options['show_plot'] or options['save_plot'] or options['return_plot']:
peak_loc=round(float(1./best_frequency),2)
FAP_probabilities = [0.5, 0.1, 0.01] #Enter FAP values you want to determine
FAP_levels=ls.false_alarm_level(FAP_probabilities) #Get corresponding LS Power values
fig, axes = plt.subplots(4, 1, figsize=[10,15], tight_layout=True)
# axes = np.atleast_1d(axes)
# ax = axes[0]
# ind_clipped = np.where(np.isnan(ff))[0]
# ax.plot(time[ind_clipped], flux[ind_clipped], 'r.', rasterized=True)
# ax.plot(time, ff, 'b.', rasterized=True)
# ax.set(xlabel='Time (BJD)', ylabel='Flux')
# ax = axes[1]
# ax.plot(time, ff, 'b.', rasterized=True)
# ax.set(xlabel='Time (BJD)', ylabel='Flux (clipped)')
ax = axes[0]
ax.semilogx(1./frequency,power,color='b')
ax.plot(peak_loc, best_power, marker='d', markersize=12, color='r')
ax.text(peak_loc*1.2,best_power*0.95,'Peak Period: '+str(peak_loc)+' days')
ax.text(peak_loc*1.2,best_power*0.85,'FAP: '+str(FAP))
ax.hlines(FAP_levels, periodogram_kwargs['minperiod'], periodogram_kwargs['maxperiod'], color='grey', lw=1)
ax.text(periodogram_kwargs['maxperiod'], FAP_levels[0],'0.5% FAP ', ha='right')
ax.text(periodogram_kwargs['maxperiod'], FAP_levels[1],'0.1% FAP ', ha='right')
ax.text(periodogram_kwargs['maxperiod'], FAP_levels[2],'0.01% FAP ', ha='right')
ax.set(xlabel='Period (days)', ylabel='L-S power')
ax.tick_params(axis='both',which='major')
# ax.text(peak_loc*1.2,best_power*0.75,'std_old:'+str(std_old*1e3)[0:4]+' --> '+'std_new:'+str(std_new*1e3)[0:4])
ax = axes[1]
plot_phase_folded_lightcurve(time, ff, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylim=[np.nanmin(ff), np.nanmax(ff)], ylabel='Data (clipped; phased)')
ax = axes[2]
plot_phase_folded_lightcurve(time, ff, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylabel='Data (clipped; phased; y-zoom)')
#::: plot the autocorrelation of the data
ax = axes[3]
plot_acf(pd.Series(ff[ind_notnan], index=time[ind_notnan]), ax=ax, lags=np.linspace(start=1,stop=2000,num=100,dtype=int))
ax.set(xlabel='Lag', ylabel='Autocorrelation', title='')
if options['save_plot']:
if not os.path.exists(options['outdir']): os.makedirs(options['outdir'])
fig.savefig(os.path.join(options['outdir'],options['fname_plot']+'.pdf'), bbox_inches='tight')
if options['show_plot']:
plt.show(fig)
else:
plt.close(fig)
if options['return_plot'] is True:
return 1./best_frequency, FAP, axes
else:
return 1./best_frequency, FAP
###############################################################################
#::: estimate a good window length for spline knots, running median etc.
###############################################################################
# def estimate_window_length(time, flux, flux_err, periodogram_kwargs=None, wotan_kwargs=None, options=None):
# window_length_min = 12./24. #at least 12h to not destroy planets
# window_length_max = 1. #at most 1 day
# cadence = np.median(np.diff(time))
# best_period, FAP = estimate_period(time, flux, flux_err)
# if best_period < 100.*cadence:
# window_length = best_period/10.
# return np.min()
# return best_period/10.
# else:
# return None
# warnings.warn('Returning None. Best period was found to be', best_period*24*60, 'min., but cadence is only', cadence*24*60, 'min.')
###############################################################################
#::: remove periodic trends
###############################################################################
# def estimate_trend(time, flux, flux_err):
# iterations = 3
# wotan_kwargs = {'slide_clip':{}, 'flatten':{}}
# wotan_kwargs['slide_clip']['window_length'] = 1.
# wotan_kwargs['slide_clip']['low'] = 3.
# wotan_kwargs['slide_clip']['high'] = 3.
# wotan_kwargs['flatten']['method'] | |
# Generated from LTLfFormulaParser.g4 by ANTLR 4.9
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\26")
buf.write("\u00e5\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\4\7\4=\n")
buf.write("\4\f\4\16\4@\13\4\3\5\3\5\3\5\7\5E\n\5\f\5\16\5H\13\5")
buf.write("\3\6\3\6\3\6\7\6M\n\6\f\6\16\6P\13\6\3\7\3\7\3\7\7\7U")
buf.write("\n\7\f\7\16\7X\13\7\3\b\3\b\3\b\7\b]\n\b\f\b\16\b`\13")
buf.write("\b\3\t\3\t\3\t\7\te\n\t\f\t\16\th\13\t\3\n\3\n\3\n\7\n")
buf.write("m\n\n\f\n\16\np\13\n\3\13\3\13\3\13\7\13u\n\13\f\13\16")
buf.write("\13x\13\13\3\f\3\f\3\f\7\f}\n\f\f\f\16\f\u0080\13\f\3")
buf.write("\r\5\r\u0083\n\r\3\r\3\r\3\16\5\16\u0088\n\16\3\16\3\16")
buf.write("\3\17\5\17\u008d\n\17\3\17\3\17\3\20\5\20\u0092\n\20\3")
buf.write("\20\3\20\3\21\3\21\5\21\u0098\n\21\3\21\3\21\3\21\3\21")
buf.write("\5\21\u009e\n\21\3\22\3\22\5\22\u00a2\n\22\3\23\3\23\3")
buf.write("\24\3\24\3\24\7\24\u00a9\n\24\f\24\16\24\u00ac\13\24\3")
buf.write("\25\3\25\3\25\7\25\u00b1\n\25\f\25\16\25\u00b4\13\25\3")
buf.write("\26\3\26\3\26\7\26\u00b9\n\26\f\26\16\26\u00bc\13\26\3")
buf.write("\27\3\27\3\27\7\27\u00c1\n\27\f\27\16\27\u00c4\13\27\3")
buf.write("\30\3\30\3\30\7\30\u00c9\n\30\f\30\16\30\u00cc\13\30\3")
buf.write("\31\5\31\u00cf\n\31\3\31\3\31\5\31\u00d3\n\31\3\31\3\31")
buf.write("\3\31\3\31\5\31\u00d9\n\31\3\32\7\32\u00dc\n\32\f\32\16")
buf.write("\32\u00df\13\32\3\32\3\32\5\32\u00e3\n\32\3\32\2\2\33")
buf.write("\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62")
buf.write("\2\2\2\u00e6\2\64\3\2\2\2\4\67\3\2\2\2\69\3\2\2\2\bA\3")
buf.write("\2\2\2\nI\3\2\2\2\fQ\3\2\2\2\16Y\3\2\2\2\20a\3\2\2\2\22")
buf.write("i\3\2\2\2\24q\3\2\2\2\26y\3\2\2\2\30\u0082\3\2\2\2\32")
buf.write("\u0087\3\2\2\2\34\u008c\3\2\2\2\36\u0091\3\2\2\2 \u009d")
buf.write("\3\2\2\2\"\u00a1\3\2\2\2$\u00a3\3\2\2\2&\u00a5\3\2\2\2")
buf.write("(\u00ad\3\2\2\2*\u00b5\3\2\2\2,\u00bd\3\2\2\2.\u00c5\3")
buf.write("\2\2\2\60\u00d8\3\2\2\2\62\u00e2\3\2\2\2\64\65\5\4\3\2")
buf.write("\65\66\7\2\2\3\66\3\3\2\2\2\678\5\6\4\28\5\3\2\2\29>\5")
buf.write("\b\5\2:;\7\16\2\2;=\5\b\5\2<:\3\2\2\2=@\3\2\2\2><\3\2")
buf.write("\2\2>?\3\2\2\2?\7\3\2\2\2@>\3\2\2\2AF\5\n\6\2BC\7\17\2")
buf.write("\2CE\5\n\6\2DB\3\2\2\2EH\3\2\2\2FD\3\2\2\2FG\3\2\2\2G")
buf.write("\t\3\2\2\2HF\3\2\2\2IN\5\f\7\2JK\7\20\2\2KM\5\f\7\2LJ")
buf.write("\3\2\2\2MP\3\2\2\2NL\3\2\2\2NO\3\2\2\2O\13\3\2\2\2PN\3")
buf.write("\2\2\2QV\5\16\b\2RS\7\21\2\2SU\5\16\b\2TR\3\2\2\2UX\3")
buf.write("\2\2\2VT\3\2\2\2VW\3\2\2\2W\r\3\2\2\2XV\3\2\2\2Y^\5\20")
buf.write("\t\2Z[\7\22\2\2[]\5\20\t\2\\Z\3\2\2\2]`\3\2\2\2^\\\3\2")
buf.write("\2\2^_\3\2\2\2_\17\3\2\2\2`^\3\2\2\2af\5\22\n\2bc\7\3")
buf.write("\2\2ce\5\22\n\2db\3\2\2\2eh\3\2\2\2fd\3\2\2\2fg\3\2\2")
buf.write("\2g\21\3\2\2\2hf\3\2\2\2in\5\24\13\2jk\7\5\2\2km\5\24")
buf.write("\13\2lj\3\2\2\2mp\3\2\2\2nl\3\2\2\2no\3\2\2\2o\23\3\2")
buf.write("\2\2pn\3\2\2\2qv\5\26\f\2rs\7\6\2\2su\5\26\f\2tr\3\2\2")
buf.write("\2ux\3\2\2\2vt\3\2\2\2vw\3\2\2\2w\25\3\2\2\2xv\3\2\2\2")
buf.write("y~\5\30\r\2z{\7\4\2\2{}\5\30\r\2|z\3\2\2\2}\u0080\3\2")
buf.write("\2\2~|\3\2\2\2~\177\3\2\2\2\177\27\3\2\2\2\u0080~\3\2")
buf.write("\2\2\u0081\u0083\7\7\2\2\u0082\u0081\3\2\2\2\u0082\u0083")
buf.write("\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0085\5\32\16\2\u0085")
buf.write("\31\3\2\2\2\u0086\u0088\7\b\2\2\u0087\u0086\3\2\2\2\u0087")
buf.write("\u0088\3\2\2\2\u0088\u0089\3\2\2\2\u0089\u008a\5\34\17")
buf.write("\2\u008a\33\3\2\2\2\u008b\u008d\7\t\2\2\u008c\u008b\3")
buf.write("\2\2\2\u008c\u008d\3\2\2\2\u008d\u008e\3\2\2\2\u008e\u008f")
buf.write("\5\36\20\2\u008f\35\3\2\2\2\u0090\u0092\7\n\2\2\u0091")
buf.write("\u0090\3\2\2\2\u0091\u0092\3\2\2\2\u0092\u0093\3\2\2\2")
buf.write("\u0093\u0094\5 \21\2\u0094\37\3\2\2\2\u0095\u009e\5\"")
buf.write("\22\2\u0096\u0098\7\23\2\2\u0097\u0096\3\2\2\2\u0097\u0098")
buf.write("\3\2\2\2\u0098\u0099\3\2\2\2\u0099\u009a\7\24\2\2\u009a")
buf.write("\u009b\5\4\3\2\u009b\u009c\7\25\2\2\u009c\u009e\3\2\2")
buf.write("\2\u009d\u0095\3\2\2\2\u009d\u0097\3\2\2\2\u009e!\3\2")
buf.write("\2\2\u009f\u00a2\7\13\2\2\u00a0\u00a2\5$\23\2\u00a1\u009f")
buf.write("\3\2\2\2\u00a1\u00a0\3\2\2\2\u00a2#\3\2\2\2\u00a3\u00a4")
buf.write("\5&\24\2\u00a4%\3\2\2\2\u00a5\u00aa\5(\25\2\u00a6\u00a7")
buf.write("\7\16\2\2\u00a7\u00a9\5(\25\2\u00a8\u00a6\3\2\2\2\u00a9")
buf.write("\u00ac\3\2\2\2\u00aa\u00a8\3\2\2\2\u00aa\u00ab\3\2\2\2")
buf.write("\u00ab\'\3\2\2\2\u00ac\u00aa\3\2\2\2\u00ad\u00b2\5*\26")
buf.write("\2\u00ae\u00af\7\17\2\2\u00af\u00b1\5*\26\2\u00b0\u00ae")
buf.write("\3\2\2\2\u00b1\u00b4\3\2\2\2\u00b2\u00b0\3\2\2\2\u00b2")
buf.write("\u00b3\3\2\2\2\u00b3)\3\2\2\2\u00b4\u00b2\3\2\2\2\u00b5")
buf.write("\u00ba\5,\27\2\u00b6\u00b7\7\20\2\2\u00b7\u00b9\5,\27")
buf.write("\2\u00b8\u00b6\3\2\2\2\u00b9\u00bc\3\2\2\2\u00ba\u00b8")
buf.write("\3\2\2\2\u00ba\u00bb\3\2\2\2\u00bb+\3\2\2\2\u00bc\u00ba")
buf.write("\3\2\2\2\u00bd\u00c2\5.\30\2\u00be\u00bf\7\21\2\2\u00bf")
buf.write("\u00c1\5.\30\2\u00c0\u00be\3\2\2\2\u00c1\u00c4\3\2\2\2")
buf.write("\u00c2\u00c0\3\2\2\2\u00c2\u00c3\3\2\2\2\u00c3-\3\2\2")
buf.write("\2\u00c4\u00c2\3\2\2\2\u00c5\u00ca\5\60\31\2\u00c6\u00c7")
buf.write("\7\22\2\2\u00c7\u00c9\5\60\31\2\u00c8\u00c6\3\2\2\2\u00c9")
buf.write("\u00cc\3\2\2\2\u00ca\u00c8\3\2\2\2\u00ca\u00cb\3\2\2\2")
buf.write("\u00cb/\3\2\2\2\u00cc\u00ca\3\2\2\2\u00cd\u00cf\7\23\2")
buf.write("\2\u00ce\u00cd\3\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\u00d0")
buf.write("\3\2\2\2\u00d0\u00d9\5\62\32\2\u00d1\u00d3\7\23\2\2\u00d2")
buf.write("\u00d1\3\2\2\2\u00d2\u00d3\3\2\2\2\u00d3\u00d4\3\2\2\2")
buf.write("\u00d4\u00d5\7\24\2\2\u00d5\u00d6\5$\23\2\u00d6\u00d7")
buf.write("\7\25\2\2\u00d7\u00d9\3\2\2\2\u00d8\u00ce\3\2\2\2\u00d8")
buf.write("\u00d2\3\2\2\2\u00d9\61\3\2\2\2\u00da\u00dc\7\13\2\2\u00db")
buf.write("\u00da\3\2\2\2\u00dc\u00df\3\2\2\2\u00dd\u00db\3\2\2\2")
buf.write("\u00dd\u00de\3\2\2\2\u00de\u00e3\3\2\2\2\u00df\u00dd\3")
buf.write("\2\2\2\u00e0\u00e3\7\f\2\2\u00e1\u00e3\7\r\2\2\u00e2\u00dd")
buf.write("\3\2\2\2\u00e2\u00e0\3\2\2\2\u00e2\u00e1\3\2\2\2\u00e3")
buf.write("\63\3\2\2\2\34>FNV^fnv~\u0082\u0087\u008c\u0091\u0097")
buf.write("\u009d\u00a1\u00aa\u00b2\u00ba\u00c2\u00ca\u00ce\u00d2")
buf.write("\u00d8\u00dd\u00e2")
return buf.getvalue()
class LTLfFormulaParserParser ( Parser ):
grammarFileName = "LTLfFormulaParser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ ]
symbolicNames = [ "<INVALID>", "WEAKUNTIL", "UNTIL", "RELEASE", "STRONGRELEASE",
"GLOBALLY", "EVENTUALLY", "WEAKNEXT", "NEXT", "NAME",
"TRUE", "FALSE", "DOUBLEIMPLY", "IMPLY", "XOR", "OR",
"AND", "NOT", "LPAREN", "RPAREN", "WS" ]
RULE_start = 0
RULE_expression = 1
RULE_doubleImplicationTemp = 2
RULE_implicationTemp = 3
RULE_xorTemp = 4
RULE_orTemp = 5
RULE_andTemp = 6
RULE_release = 7
RULE_weakUntil = 8
RULE_strongRelease = 9
RULE_until = 10
RULE_globally = 11
RULE_eventually = 12
RULE_weakNext = 13
RULE_next_ = 14
RULE_notTemp = 15
RULE_ltlfAtom = 16
RULE_propositionalFormula = 17
RULE_doubleImplicationProp = 18
RULE_implicationProp = 19
RULE_xorProp = 20
RULE_orProp = 21
RULE_andProp = 22
RULE_notProp = 23
RULE_atom = 24
ruleNames = [ "start", "expression", "doubleImplicationTemp", "implicationTemp",
"xorTemp", "orTemp", "andTemp", "release", "weakUntil",
"strongRelease", "until", "globally", "eventually", "weakNext",
"next_", "notTemp", "ltlfAtom", "propositionalFormula",
"doubleImplicationProp", "implicationProp", "xorProp",
"orProp", "andProp", "notProp", "atom" ]
EOF = Token.EOF
WEAKUNTIL=1
UNTIL=2
RELEASE=3
STRONGRELEASE=4
GLOBALLY=5
EVENTUALLY=6
WEAKNEXT=7
NEXT=8
NAME=9
TRUE=10
FALSE=11
DOUBLEIMPLY=12
IMPLY=13
XOR=14
OR=15
AND=16
NOT=17
LPAREN=18
RPAREN=19
WS=20
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StartContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(LTLfFormulaParserParser.ExpressionContext,0)
def EOF(self):
return self.getToken(LTLfFormulaParserParser.EOF, 0)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_start
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStart" ):
listener.enterStart(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStart" ):
listener.exitStart(self)
def start(self):
localctx = LTLfFormulaParserParser.StartContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_start)
try:
self.enterOuterAlt(localctx, 1)
self.state = 50
self.expression()
self.state = 51
self.match(LTLfFormulaParserParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def doubleImplicationTemp(self):
return self.getTypedRuleContext(LTLfFormulaParserParser.DoubleImplicationTempContext,0)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def expression(self):
localctx = LTLfFormulaParserParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_expression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 53
self.doubleImplicationTemp()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DoubleImplicationTempContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def implicationTemp(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LTLfFormulaParserParser.ImplicationTempContext)
else:
return self.getTypedRuleContext(LTLfFormulaParserParser.ImplicationTempContext,i)
def DOUBLEIMPLY(self, i:int=None):
if i is None:
return self.getTokens(LTLfFormulaParserParser.DOUBLEIMPLY)
else:
return self.getToken(LTLfFormulaParserParser.DOUBLEIMPLY, i)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_doubleImplicationTemp
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDoubleImplicationTemp" ):
listener.enterDoubleImplicationTemp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDoubleImplicationTemp" ):
listener.exitDoubleImplicationTemp(self)
def doubleImplicationTemp(self):
localctx = LTLfFormulaParserParser.DoubleImplicationTempContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_doubleImplicationTemp)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 55
self.implicationTemp()
self.state = 60
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==LTLfFormulaParserParser.DOUBLEIMPLY:
self.state = 56
self.match(LTLfFormulaParserParser.DOUBLEIMPLY)
self.state = 57
self.implicationTemp()
self.state = 62
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ImplicationTempContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def xorTemp(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LTLfFormulaParserParser.XorTempContext)
else:
return self.getTypedRuleContext(LTLfFormulaParserParser.XorTempContext,i)
def IMPLY(self, i:int=None):
if i is None:
return self.getTokens(LTLfFormulaParserParser.IMPLY)
else:
return self.getToken(LTLfFormulaParserParser.IMPLY, i)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_implicationTemp
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterImplicationTemp" ):
listener.enterImplicationTemp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitImplicationTemp" ):
listener.exitImplicationTemp(self)
def implicationTemp(self):
localctx = LTLfFormulaParserParser.ImplicationTempContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_implicationTemp)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 63
self.xorTemp()
self.state = 68
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==LTLfFormulaParserParser.IMPLY:
self.state = 64
self.match(LTLfFormulaParserParser.IMPLY)
self.state = 65
self.xorTemp()
self.state = 70
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class XorTempContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def orTemp(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LTLfFormulaParserParser.OrTempContext)
else:
return self.getTypedRuleContext(LTLfFormulaParserParser.OrTempContext,i)
def XOR(self, i:int=None):
if i is None:
return self.getTokens(LTLfFormulaParserParser.XOR)
else:
return self.getToken(LTLfFormulaParserParser.XOR, i)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_xorTemp
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterXorTemp" ):
listener.enterXorTemp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitXorTemp" ):
listener.exitXorTemp(self)
def xorTemp(self):
localctx = LTLfFormulaParserParser.XorTempContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_xorTemp)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 71
self.orTemp()
self.state = 76
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==LTLfFormulaParserParser.XOR:
self.state = 72
self.match(LTLfFormulaParserParser.XOR)
self.state = 73
self.orTemp()
self.state = 78
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OrTempContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def andTemp(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LTLfFormulaParserParser.AndTempContext)
else:
return self.getTypedRuleContext(LTLfFormulaParserParser.AndTempContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(LTLfFormulaParserParser.OR)
else:
return self.getToken(LTLfFormulaParserParser.OR, i)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_orTemp
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOrTemp" ):
listener.enterOrTemp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOrTemp" ):
listener.exitOrTemp(self)
def orTemp(self):
localctx = LTLfFormulaParserParser.OrTempContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_orTemp)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 79
self.andTemp()
self.state = 84
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==LTLfFormulaParserParser.OR:
self.state = 80
self.match(LTLfFormulaParserParser.OR)
self.state = 81
self.andTemp()
self.state = 86
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AndTempContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def release(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LTLfFormulaParserParser.ReleaseContext)
else:
return self.getTypedRuleContext(LTLfFormulaParserParser.ReleaseContext,i)
def AND(self, i:int=None):
if i is None:
return self.getTokens(LTLfFormulaParserParser.AND)
else:
return self.getToken(LTLfFormulaParserParser.AND, i)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_andTemp
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAndTemp" ):
listener.enterAndTemp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAndTemp" ):
listener.exitAndTemp(self)
def andTemp(self):
localctx = LTLfFormulaParserParser.AndTempContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_andTemp)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 87
self.release()
self.state = 92
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==LTLfFormulaParserParser.AND:
self.state = 88
self.match(LTLfFormulaParserParser.AND)
self.state = 89
self.release()
self.state = 94
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ReleaseContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def weakUntil(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LTLfFormulaParserParser.WeakUntilContext)
else:
return self.getTypedRuleContext(LTLfFormulaParserParser.WeakUntilContext,i)
def WEAKUNTIL(self, i:int=None):
if i is None:
return self.getTokens(LTLfFormulaParserParser.WEAKUNTIL)
else:
return self.getToken(LTLfFormulaParserParser.WEAKUNTIL, i)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_release
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRelease" ):
listener.enterRelease(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRelease" ):
listener.exitRelease(self)
def release(self):
localctx = LTLfFormulaParserParser.ReleaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_release)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 95
self.weakUntil()
self.state = 100
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==LTLfFormulaParserParser.WEAKUNTIL:
self.state = 96
self.match(LTLfFormulaParserParser.WEAKUNTIL)
self.state = 97
self.weakUntil()
self.state = 102
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WeakUntilContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def strongRelease(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LTLfFormulaParserParser.StrongReleaseContext)
else:
return self.getTypedRuleContext(LTLfFormulaParserParser.StrongReleaseContext,i)
def RELEASE(self, i:int=None):
if i is None:
return self.getTokens(LTLfFormulaParserParser.RELEASE)
else:
return self.getToken(LTLfFormulaParserParser.RELEASE, i)
def getRuleIndex(self):
return LTLfFormulaParserParser.RULE_weakUntil
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWeakUntil" ):
listener.enterWeakUntil(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWeakUntil" ):
listener.exitWeakUntil(self)
def weakUntil(self):
localctx = LTLfFormulaParserParser.WeakUntilContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_weakUntil)
self._la | |
current CUDS object.
Result type is a list, if more than one CUDS object is
returned.
"""
check_arguments(Cuds, *args)
old_objects = self.get(*[arg.uid for arg in args])
if len(args) == 1:
old_objects = [old_objects]
if any(x is None for x in old_objects):
message = 'Cannot update because cuds_object not added.'
raise ValueError(message)
result = list()
for arg, old_cuds_object in zip(args, old_objects):
if arg.session is self.session:
raise ValueError("Please provide CUDS objects from a "
"different session to update()")
# Updates all instances
result.append(self._recursive_store(arg, old_cuds_object))
if len(args) == 1:
return result[0]
return result
def remove(self,
*args: Union["Cuds", UUID, URIRef],
rel: OntologyRelationship = cuba.activeRelationship,
oclass: OntologyClass = None):
"""Remove elements from the CUDS object.
Expected calls are remove(), remove(*uids/Cuds),
remove(rel), remove(oclass), remove(*uids/Cuds, rel),
remove(rel, oclass)
Args:
args (Union[Cuds, UUID, URIRef]): UUIDs of the elements to remove
or the elements themselves.
rel (OntologyRelationship, optional): Only remove cuds_object
which are connected by subclass of given relationship.
Defaults to cuba.activeRelationship.
oclass (OntologyClass, optional): Only remove elements which are a
subclass of the given ontology class. Defaults to None.
Raises:
RuntimeError: No CUDS object removed, because specified CUDS
objects are not in the container of the current CUDS object
directly.
"""
uids = [arg.uid if isinstance(arg, Cuds) else arg for arg in args]
# Get mapping from uids to connecting relationships
_, relationship_mapping = self._get(*uids, rel=rel,
oclass=oclass, return_mapping=True)
if not relationship_mapping:
raise RuntimeError("Did not remove any Cuds object, "
"because none matched your filter.")
uid_relationships = list(relationship_mapping.items())
# load all the neighbors to delete and remove inverse relationship
neighbors = self.session.load(
*[uid for uid, _ in uid_relationships])
for uid_relationship, neighbor in zip(uid_relationships,
neighbors):
uid, relationships = uid_relationship
for relationship in relationships:
self._remove_direct(relationship, uid)
neighbor._remove_inverse(relationship, self.uid)
def iter(self,
*uids: Union[UUID, URIRef],
rel: OntologyRelationship = cuba.activeRelationship,
oclass: OntologyClass = None,
return_rel: bool = False) -> Iterator["Cuds"]:
"""Iterate over the contained elements.
Only iterate over objects of a given type, uid or oclass.
Expected calls are iter(), iter(*uids), iter(rel),
iter(oclass), iter(*uids, rel), iter(rel, oclass).
If uids are specified:
The position of each element in the result is determined by to the
position of the corresponding uid in the given list of
uids. In this case, the result can contain None values if a
given uid is not a child of this cuds_object.
If no uids are specified:
The result is ordered randomly.
Args:
uids (Union[UUID, URIRef]): uids of the elements.
rel (OntologyRelationship, optional): Only return cuds_object
which are connected by subclass of given relationship.
Defaults to cuba.activeRelationship.
oclass (OntologyClass, optional): Only return elements which are a
subclass of the given ontology class. Defaults to None.
return_rel (bool, optional): Whether to return the connecting
relationship. Defaults to False.
Returns:
Iterator[Cuds]: The queried objects.
"""
if return_rel:
collected_uids, mapping = self._get(*uids, rel=rel, oclass=oclass,
return_mapping=True)
else:
collected_uids = self._get(*uids, rel=rel, oclass=oclass)
result = self._load_cuds_objects(collected_uids)
for r in result:
if not return_rel:
yield r
else:
yield from ((r, m) for m in mapping[r.uid])
def _recursive_store(self, new_cuds_object, old_cuds_object=None):
"""Recursively store cuds_object and all its children.
One-way relationships and dangling references are fixed.
Args:
new_cuds_object (Cuds): The Cuds object to store recursively.
old_cuds_object (Cuds, optional): The old version of the
CUDS object. Defaults to None.
Returns:
Cuds: The added CUDS object.
"""
# add new_cuds_object to self and replace old_cuds_object
queue = [(self, new_cuds_object, old_cuds_object)]
uids_stored = {new_cuds_object.uid, self.uid}
missing = dict()
result = None
while queue:
# Store copy in registry
add_to, new_cuds_object, old_cuds_object = queue.pop(0)
if new_cuds_object.uid in missing:
del missing[new_cuds_object.uid]
old_cuds_object = clone_cuds_object(old_cuds_object)
new_child_getter = new_cuds_object
new_cuds_object = create_from_cuds_object(new_cuds_object,
add_to.session)
# fix the connections to the neighbors
add_to._fix_neighbors(new_cuds_object, old_cuds_object,
add_to.session, missing)
result = result or new_cuds_object
for outgoing_rel in new_cuds_object._neighbors:
# do not recursively add parents
if not outgoing_rel.is_subclass_of(cuba.activeRelationship):
continue
# add children not already added
for child_uid in \
new_cuds_object._neighbors[outgoing_rel]:
if child_uid not in uids_stored:
new_child = new_child_getter.get(
child_uid, rel=outgoing_rel)
old_child = self.session.load(child_uid).first()
queue.append((new_cuds_object, new_child, old_child))
uids_stored.add(new_child.uid)
# perform the deletion
for uid in missing:
for cuds_object, rel in missing[uid]:
del cuds_object._neighbors[rel][uid]
if not cuds_object._neighbors[rel]:
del cuds_object._neighbors[rel]
return result
@staticmethod
def _fix_neighbors(new_cuds_object, old_cuds_object, session, missing):
"""Fix all the connections of the neighbors of a Cuds object.
That CUDS is going to be replaced later.
Behavior when neighbors change:
- new_cuds_object has parents, that weren't parents of old_cuds_object.
- the parents are already stored in the session of old_cuds_object.
- they are not already stored in the session of old_cuds_object.
--> Add references between new_cuds_object and the parents that are
already in the session.
--> Delete references between new_cuds_object and parents that are
not available.
- new_cuds_object has children, that weren't
children of old_cuds_object.
--> add/update them recursively.
- A parent of old_cuds_object is no longer a parent of new_cuds_object.
--> Add a relationship between that parent and the new cuds_object.
- A child of old_cuds_object is no longer a child of new_cuds_object.
--> Remove the relationship between child and new_cuds_object.
Args:
new_cuds_object (Cuds): Cuds object that will replace the old one.
old_cuds_object (Cuds, optional): Cuds object that will be
replaced by a new one. Can be None if the new Cuds object does
not replace any object.
session (Session): The session where the adjustments should take
place.
missing (Dict): dictionary that will be populated with connections
to objects, that are currently not available in the new session.
The recursive add might add it later.
"""
old_cuds_object = old_cuds_object or None
# get the parents that got parents after adding the new Cuds
new_parent_diff = get_neighbor_diff(
new_cuds_object, old_cuds_object, mode="non-active")
# get the neighbors that were neighbors
# before adding the new cuds_object
old_neighbor_diff = get_neighbor_diff(old_cuds_object,
new_cuds_object)
# Load all the cuds_objects of the session
cuds_objects = iter(session.load(
*[uid for uid, _ in
new_parent_diff + old_neighbor_diff]))
# Perform the fixes
Cuds._fix_new_parents(new_cuds_object=new_cuds_object,
new_parents=cuds_objects,
new_parent_diff=new_parent_diff,
missing=missing)
Cuds._fix_old_neighbors(new_cuds_object=new_cuds_object,
old_cuds_object=old_cuds_object,
old_neighbors=cuds_objects,
old_neighbor_diff=old_neighbor_diff)
@staticmethod
def _fix_new_parents(new_cuds_object, new_parents,
new_parent_diff: List[Tuple[Union[UUID, URIRef],
OntologyRelationship]],
missing):
"""Fix the relationships of the added Cuds objects.
Fixes relationships to the parents of the added Cuds object.
Args:
new_cuds_object (Cuds): The added Cuds object.
new_parents (Iterator[Cuds]): The new parents of the added CUDS
object.
new_parent_diff : stuff.
The uids of the new parents and the relations they are
connected with.
missing (dict): dictionary that will be populated with connections
to objects, that are currently not available in the new
session. The recursive_add might add it later.
"""
# Iterate over the new parents
for (parent_uid, relationship), parent in zip(new_parent_diff,
new_parents):
if relationship.is_subclass_of(cuba.activeRelationship):
continue
inverse = relationship.inverse
# Delete connection to parent if parent is not present
if parent is None:
if parent_uid not in missing:
missing[parent_uid] = list()
missing[parent_uid].append((new_cuds_object,
relationship))
continue
# Add the inverse to the parent
if inverse not in parent._neighbors:
parent._neighbors[inverse] = {}
parent._neighbors[inverse][new_cuds_object.uid] = \
new_cuds_object.oclasses
@staticmethod
def _fix_old_neighbors(new_cuds_object, old_cuds_object,
old_neighbors: List[Tuple[Union[UUID, URIRef],
OntologyRelationship]],
old_neighbor_diff):
"""Fix the relationships of the added Cuds objects.
Fixes relationships to Cuds object that were previously neighbors.
Args:
new_cuds_object (Cuds): The added Cuds object
old_cuds_object (Cuds, optional): The Cuds object that is going
to be replaced
old_neighbors (Iterator[Cuds]): The Cuds object that were neighbors
before the replacement.
old_neighbor_diff: The uids of the old neighbors and the
relations they are connected with.
"""
# iterate over all old neighbors.
for (neighbor_uid, relationship), neighbor \
in zip(old_neighbor_diff, old_neighbors):
inverse = relationship.inverse
# delete the inverse if neighbors are children
if relationship.is_subclass_of(cuba.activeRelationship):
if inverse in neighbor._neighbors:
neighbor._remove_direct(inverse,
new_cuds_object.uid)
# if neighbor is parent, add missing relationships
else:
if relationship not in new_cuds_object._neighbors:
new_cuds_object._neighbors[relationship] = {}
for (uid, oclasses), parent in \
zip(old_cuds_object._neighbors[relationship].items(),
neighbor._neighbors):
if parent is not None:
new_cuds_object \
._neighbors[relationship][uid] = oclasses
def _add_direct(self, cuds_object, rel):
"""Add an cuds_object with a specific relationship.
Args:
cuds_object (Cuds): CUDS object to be added
rel (OntologyRelationship): relationship with the cuds_object to
add.
"""
# First element, create set
if rel not in self._neighbors.keys():
self._neighbors[rel] = \
{cuds_object.uid: cuds_object.oclasses}
# Element not already there
| |
"""\
This implements a command line interpreter (CLI) for the concur API.
OAuth data is kept in a JSON file, for easy portability between different
programming languages.
Currently, the initialization of OAuth requires the user to copy a URL
into a web browser, then copy the URL of the resulting page back to this
script.
"""
copyright = """
Copyright (c) 2013 <NAME> <<EMAIL>>
All Rights Reserved.
Licensed under the Academic Free License (AFL 3.0)
http://opensource.org/licenses/afl-3.0
"""
from argparse import ArgumentParser
from cmd import Cmd as _Cmd
from datetime import datetime
from functools import wraps as _wraps
from pprint import pprint as _pprint
import json as _json
import re
from ValidateElements import *
try:
from concur import ConcurClient, ConcurAPIError
except ImportError:
from sys import path
from os.path import join, normpath
# Try looking in the parent of this script's directory.
path.insert(0, normpath(join(path[0], '..')))
from concur import ConcurClient, ConcurAPIError
import concur._xml2json as x2j
def mk_parser(*args):
parser = ArgumentParser(
prog='',
description='',
add_help=False,
)
head_is_string = lambda lst: isinstance(lst[0], basestring)
args = list(args)
args.append(None) # sentinel value
while args:
dest = args.pop(0)
if dest is None:
break
flags = {} if head_is_string(args) else args.pop(0)
if isinstance(dest, basestring):
dest = (dest,)
parser.add_argument(*dest, **flags)
return parser
def _syntax(parser, dont_split=False, f=None):
'''Decorator that accepts an ArgumentParser, then mutates a
function that is accepts a string to instead accept a Namespace.'''
if f is None:
from copy import copy
from functools import partial
return partial(_syntax, copy(parser), dont_split)
parser.prog = f.func_name[3:]
parser.description = f.func_doc or 'No description available'
f.func_doc = parser.format_help()
@_wraps(f)
def wrapper(self, line):
args = [line] if dont_split else line.split()
return f(self, parser.parse_args(args))
return wrapper
return decorator
def _get(filename, default):
from os.path import expanduser
return expanduser(filename if filename else default)
def _set(name, definition, dict, allow_creation=True):
'''Helper function to set a value in a hash'''
def show(item):
'''Helper function to display a key-value pair'''
if isinstance(item[1], basestring):
print '%s: %s' % item
if name is None:
for item in sorted(dict.items()):
show(item)
elif len(definition) == 0:
try:
show((name, dict[name]))
except KeyError:
pass
elif allow_creation or isinstance(dict.get(name), basestring):
dict[name] = ' '.join(definition)
else:
print 'unknown key %r' % parsed[0]
def _unset(names, dict):
'''Helper function to remove a value from a hash'''
for name in names:
try:
del dict[name]
except KeyError:
pass
no_args = mk_parser()
filename = mk_parser('filename', {'nargs':'?'})
value = mk_parser('value', {'nargs':'?'})
define = mk_parser('name', {'nargs':'?'}, 'definition', {'nargs':'*'})
undefine = mk_parser('names', {'nargs':'+'})
key_value = lambda x: x.split('=', 1) # turn 'foo=bar' into ('foo', 'bar')
http_request = mk_parser('path', {'nargs':'+'},
('-o', '--options'), {'nargs':'*', 'type': key_value, 'default': ()})
options = mk_parser('options', {'nargs':'*', 'type': key_value, 'default': ()})
class ConcurCmd(_Cmd):
config_file = '~/.concur_cli.rc'
oauth_file = '~/concur_oauth.json'
def __init__(self, config_file=None):
'''Initializes the interpreter.'''
self.client = ConcurClient()
self.aliases = {}
self.open_files = []
self.do_load(self.config_file)
return _Cmd.__init__(self)
def onecmd(self, line):
try:
return _Cmd.onecmd(self, line)
except ConcurAPIError as error:
print "%s: %s" % (type(error).__name__, error[0])
print error[1]
except Exception as error:
print "%s: %s" % (type(error).__name__, error)
import traceback
traceback.print_exc()
def default(self, line):
'''Handle aliases.'''
parts = line.split(None, 1)
if len(parts) > 0 and parts[0] in self.aliases:
newline = self.aliases[parts[0]]
if len(parts) > 1:
newline += ' ' + parts[1]
return self.onecmd(newline)
return _Cmd.default(self, line)
# Simple commands
@_syntax(no_args)
def do_quit(self, namespace):
'''Exits the interpreter.'''
return True
@_syntax(no_args)
def do_copyright(self, namespace):
'''Displays copyright and licensing information.'''
print copyright
@_syntax(no_args)
def do_examples(self, namespace):
'''Displays example commands.'''
print '''\
These are some commands to try.
\tget_Forms
\tget_Forms FormCode=RPTINFO
\tget_Fields FormId=n5oqVNsQ$soy2ftQuy$sU9oHBDNCFPyPQr9
\tcreate_report Name=MMMM+Expenses Purpose=All+expenses+for+MMM,+YYYY Comment=Includes+Client+Meetings. UserDefinedDate=YYYY-MM-DD+HH:MM:SS.0
\tget expense expensereport v2.0 Reports -o status=ACTIVE ReportCurrency=USD
\tget expense expensereport v2.0 report <ReportID>'''
@_syntax(value, dont_split=True)
def do_note(self, namespace):
'''Comment.'''
pass
@_syntax(value, dont_split=True)
def do_echo(self, namespace):
'''Displays information to the user.'''
print namespace.value
# Commands related to aliases.
@_syntax(define)
def do_alias(self, namespace):
'''Manage aliases.'''
_set(namespace.name, namespace.definition, self.aliases)
@_syntax(undefine)
def do_unalias(self, namespace):
'''Delete aliases.'''
_unset(namespace.names, self.aliases)
@_syntax(filename, dont_split=True)
def do_save(self, namespace):
'''Save the current configuration as a list of commands.'''
config_file = _get(namespace.filename, self.config_file)
with open(config_file, 'w') as config:
for item in self.aliases.items():
print >>config, 'alias %s %s' % item
#print >>config, 'oload %s' % self.oauth_file # TODO
@_syntax(filename, dont_split=True)
def do_load(self, namespace):
'''Run commands from a file.'''
from os.path import exists, expanduser, join
config_file = _get(namespace.filename, self.config_file)
if config_file in self.open_files:
print 'already processing %s' % config_file
return
if exists(config_file):
self.open_files.append(config_file)
with open(config_file, 'r') as config:
for line in config:
self.onecmd(line)
self.open_files.pop()
# Commands related to OAuth.
@_syntax(value, dont_split=True)
def do_client_id(self, namespace):
'''Displays or sets the value.'''
if namespace.value:
self.client.client_id = namespace.value
elif self.client.client_id:
print 'client_id =', self.client.client_id
else:
print 'The client id is not set.'
@_syntax(value, dont_split=True)
def do_client_secret(self, namespace):
'''Displays or sets the value.'''
if namespace.value:
self.client.client_secret = namespace.value
elif self.client.client_secret:
print 'client_secret =', self.client.client_secret
else:
print 'The client secret is not set.'
@_syntax(value, dont_split=True)
def do_access_token(self, namespace):
'''Displays or sets the value.'''
from urlparse import urlparse, parse_qs
client = self.client
if namespace.value:
parts = urlparse(namespace.value)
code = parse_qs(parts.query)['code'][0]
client.access_token = client.get_oauth_token(code)
elif client.access_token:
print 'access_token =', client.access_token
else:
print 'The access token is not set.'
print 'Enter the URL below in a web browser and follow the instructions.'
print ' ', client.build_oauth_url()
print 'Once the web browser redirects, copy the complete URL and'
print 'use it to re-run this command.'
@_syntax(filename, dont_split=True)
def do_osave(self, namespace):
'''Saves OAuth information into a JSON file.'''
oauth_file = _get(namespace.filename, self.oauth_file)
with open(oauth_file, 'w') as fp:
_json.dump(self.client.__dict__, fp)
@_syntax(filename, dont_split=True)
def do_oload(self, namespace):
'''Loads OAuth information from a JSON file.'''
from os.path import exists, expanduser, join
oauth_file = _get(namespace.filename, self.oauth_file)
if exists(oauth_file):
with open(oauth_file, 'r') as fp:
self.client.__dict__.update(_json.load(fp))
# Commands related to the REST API.
@_syntax(http_request)
def do_get(self, namespace):
'''Issues an HTTP GET request'''
_pprint(self.client.get('/'.join(namespace.path), **dict(namespace.options)))
@_syntax(http_request)
def do_post(self, namespace):
'''Issues an HTTP POST request'''
_pprint(self.client.post('/'.join(namespace.path))) #, **namespace.options))
# Commands specific to Concur.
@_syntax(options)
def do_create_report(self, namespace):
'''Creates a new expense report'''
_pprint(self.client.post(
'expense/expensereport/v1.1/Report',
Report=validate_report_elements(namespace.options),
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03',
))
@_syntax(options)
def do_quickexpense(self, namespace):
'''Creates a new quick expense'''
_pprint(self.client.post(
'expense/expensereport/v1.0/quickexpense/',
Report=validate_quickexpense_elements(namespace.options),
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2010/09',
))
# See also: https://developer.concur.com/api-documentation/draft-documentation/attendee-resource-draft/attendee-resource-get-draft
@_syntax(options)
def do_get_attendees_by_id(self, namespace):
'''Get attendees_by_id''' # TODO
options = validate_attendees_by_id(namespace.options)
_pprint(self.client.get(
'expense/v2.0/attendees/{attendees id}' % options,
))
# See also: https://developer.concur.com/api-documentation/draft-documentation/e-receipt-service-developer-preview/e-receipt-or-e-invoice-res
@_syntax(options)
def do_get_e_receiptandinvoice_by_id(self, namespace):
'''Get e-receiptandinvoice_by_id''' # TODO
options = validate_e_receiptandinvoice_by_id(namespace.options)
_pprint(self.client.get(
'e-receiptandinvoice/v1.0/{e-receiptandinvoice id}' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/company-card-transaction-0
@_syntax(options)
def do_get_CardCharges(self, namespace):
'''Get CardCharges''' # TODO
options = validate_CardCharges(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/CardCharges' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-delegator-resour-0
@_syntax(options)
def do_get_Delegators(self, namespace):
'''Get Delegators''' # TODO
options = validate_Delegators(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/Delegators' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-entry-attendee-r-0
@_syntax(options)
def do_get_Attendees(self, namespace):
'''Get Attendees''' # TODO
options = validate_Attendees(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Attendees' % options,
))
@_syntax(options)
def do_get_Attendees_by_id(self, namespace):
'''Get Attendees_by_id''' # TODO
options = validate_Attendees_by_id(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Attendees/{Attendees id}' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-entry-attendee-r-1
@_syntax(options)
def do_post_Attendees(self, namespace):
'''Post Attendees''' # TODO
options = validate_Attendees(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Attendees' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_Attendees_1(self, namespace):
'''Post Attendees_1''' # TODO
options = validate_Attendees_1(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Attendees' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-entry-itemizatio-0
@_syntax(options)
def do_post_Itemization(self, namespace):
'''Post Itemization''' # TODO
options = validate_Itemization(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Itemization' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-entry-resource/exp
@_syntax(options)
def do_get_entry_by_id(self, namespace):
'''Get entry_by_id''' # TODO
options = validate_entry_by_id(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-report-header-re-0
@_syntax(options)
def do_post_report(self, namespace):
'''Post report''' # TODO
options = validate_report(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/api/expense/expensereport/v1.1/report' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_batch(self, namespace):
'''Post batch''' # TODO
options = validate_batch(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/api/expense/expensereport/v1.1/report/batch' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/travel-profile-web-service-new-format/form-payment-resource/form
@_syntax(options)
def do_get_fop(self, namespace):
'''Get fop''' # TODO
options = validate_fop(namespace.options)
_pprint(self.client.get(
'travelprofile/v1.0/fop' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/travel-profile-web-service-new-format/loyalty-program-resource/l
@_syntax(options)
def do_post_loyalty(self, namespace):
'''Post loyalty''' # TODO
options = validate_loyalty(namespace.options)
_pprint(self.client.post(
'travelprofile/v1.0/loyalty' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_loyalty_1(self, namespace):
'''Post loyalty_1''' # TODO
options = validate_loyalty_1(namespace.options)
_pprint(self.client.post(
'travelprofile/v1.0/loyalty' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/oauth-20-0
@_syntax(options)
def do_get_User(self, namespace):
'''Get User''' # TODO
options = validate_User(namespace.options)
_pprint(self.client.get(
'user/v1.0/User' % options,
))
| |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Base classes and helpers.
"""
from urllib.parse import (
parse_qs,
urlparse,
)
from .exceptions import ConfigurationError
from .meta import deprecated
READ_ACCESS = "READ"
WRITE_ACCESS = "WRITE"
DRIVER_BOLT = "DRIVER_BOLT"
DRIVER_NEO4j = "DRIVER_NEO4J"
SECURITY_TYPE_NOT_SECURE = "SECURITY_TYPE_NOT_SECURE"
SECURITY_TYPE_SELF_SIGNED_CERTIFICATE = "SECURITY_TYPE_SELF_SIGNED_CERTIFICATE"
SECURITY_TYPE_SECURE = "SECURITY_TYPE_SECURE"
URI_SCHEME_BOLT = "bolt"
URI_SCHEME_BOLT_SELF_SIGNED_CERTIFICATE = "bolt+ssc"
URI_SCHEME_BOLT_SECURE = "bolt+s"
URI_SCHEME_NEO4J = "neo4j"
URI_SCHEME_NEO4J_SELF_SIGNED_CERTIFICATE = "neo4j+ssc"
URI_SCHEME_NEO4J_SECURE = "neo4j+s"
URI_SCHEME_BOLT_ROUTING = "bolt+routing"
# TODO: 6.0 - remove TRUST constants
TRUST_SYSTEM_CA_SIGNED_CERTIFICATES = "TRUST_SYSTEM_CA_SIGNED_CERTIFICATES" # Default
TRUST_ALL_CERTIFICATES = "TRUST_ALL_CERTIFICATES"
SYSTEM_DATABASE = "system"
DEFAULT_DATABASE = None # Must be a non string hashable value
# TODO: This class is not tested
class Auth:
"""Container for auth details.
:param scheme: specifies the type of authentication, examples: "basic",
"kerberos"
:type scheme: str
:param principal: specifies who is being authenticated
:type principal: str or None
:param credentials: authenticates the principal
:type credentials: str or None
:param realm: specifies the authentication provider
:type realm: str or None
:param parameters: extra key word parameters passed along to the
authentication provider
:type parameters: Dict[str, Any]
"""
def __init__(self, scheme, principal, credentials, realm=None, **parameters):
self.scheme = scheme
# Neo4j servers pre 4.4 require the principal field to always be
# present. Therefore, we transmit it even if it's an empty sting.
if principal is not None:
self.principal = principal
if credentials:
self.credentials = credentials
if realm:
self.realm = realm
if parameters:
self.parameters = parameters
# For backwards compatibility
AuthToken = Auth
def basic_auth(user, password, realm=None):
"""Generate a basic auth token for a given user and password.
This will set the scheme to "basic" for the auth token.
:param user: user name, this will set the
:type user: str
:param password: current password, this will set the credentials
:type password: str
:param realm: specifies the authentication provider
:type realm: str or None
:return: auth token for use with :meth:`GraphDatabase.driver` or
:meth:`AsyncGraphDatabase.driver`
:rtype: :class:`neo4j.Auth`
"""
return Auth("basic", user, password, realm)
def kerberos_auth(base64_encoded_ticket):
"""Generate a kerberos auth token with the base64 encoded ticket.
This will set the scheme to "kerberos" for the auth token.
:param base64_encoded_ticket: a base64 encoded service ticket, this will set
the credentials
:type base64_encoded_ticket: str
:return: auth token for use with :meth:`GraphDatabase.driver` or
:meth:`AsyncGraphDatabase.driver`
:rtype: :class:`neo4j.Auth`
"""
return Auth("kerberos", "", base64_encoded_ticket)
def bearer_auth(base64_encoded_token):
"""Generate an auth token for Single-Sign-On providers.
This will set the scheme to "bearer" for the auth token.
:param base64_encoded_token: a base64 encoded authentication token generated
by a Single-Sign-On provider.
:type base64_encoded_token: str
:return: auth token for use with :meth:`GraphDatabase.driver` or
:meth:`AsyncGraphDatabase.driver`
:rtype: :class:`neo4j.Auth`
"""
return Auth("bearer", None, base64_encoded_token)
def custom_auth(principal, credentials, realm, scheme, **parameters):
"""Generate a custom auth token.
:param principal: specifies who is being authenticated
:type principal: str or None
:param credentials: authenticates the principal
:type credentials: str or None
:param realm: specifies the authentication provider
:type realm: str or None
:param scheme: specifies the type of authentication
:type scheme: str or None
:param parameters: extra key word parameters passed along to the
authentication provider
:type parameters: Dict[str, Any]
:return: auth token for use with :meth:`GraphDatabase.driver` or
:meth:`AsyncGraphDatabase.driver`
:rtype: :class:`neo4j.Auth`
"""
return Auth(scheme, principal, credentials, realm, **parameters)
class Bookmark:
"""A Bookmark object contains an immutable list of bookmark string values.
.. deprecated:: 5.0
`Bookmark` will be removed in version 6.0.
Use :class:`Bookmarks` instead.
:param values: ASCII string values
"""
@deprecated("Use the `Bookmarks`` class instead.")
def __init__(self, *values):
if values:
bookmarks = []
for ix in values:
try:
if ix:
ix.encode("ascii")
bookmarks.append(ix)
except UnicodeEncodeError as e:
raise ValueError("The value {} is not ASCII".format(ix))
self._values = frozenset(bookmarks)
else:
self._values = frozenset()
def __repr__(self):
"""
:return: repr string with sorted values
"""
return "<Bookmark values={{{}}}>".format(", ".join(["'{}'".format(ix) for ix in sorted(self._values)]))
def __bool__(self):
return bool(self._values)
@property
def values(self):
"""
:return: immutable list of bookmark string values
:rtype: frozenset
"""
return self._values
class Bookmarks:
"""Container for an immutable set of bookmark string values.
Bookmarks are used to causally chain session.
See :meth:`Session.last_bookmarks` or :meth:`AsyncSession.last_bookmarks`
for more information.
Use addition to combine multiple Bookmarks objects::
bookmarks3 = bookmarks1 + bookmarks2
"""
def __init__(self):
self._raw_values = frozenset()
def __repr__(self):
"""
:return: repr string with sorted values
"""
return "<Bookmarks values={{{}}}>".format(
", ".join(map(repr, sorted(self._raw_values)))
)
def __bool__(self):
return bool(self._raw_values)
def __add__(self, other):
if isinstance(other, Bookmarks):
if not other:
return self
ret = self.__class__()
ret._raw_values = self._raw_values | other._raw_values
return ret
return NotImplemented
@property
def raw_values(self):
"""The raw bookmark values.
You should not need to access them unless you want to serialize
bookmarks.
:return: immutable list of bookmark string values
:rtype: frozenset[str]
"""
return self._raw_values
@classmethod
def from_raw_values(cls, values):
"""Create a Bookmarks object from a list of raw bookmark string values.
You should not need to use this method unless you want to deserialize
bookmarks.
:param values: ASCII string values (raw bookmarks)
:type values: Iterable[str]
"""
obj = cls()
bookmarks = []
for value in values:
if not isinstance(value, str):
raise TypeError("Raw bookmark values must be str. "
"Found {}".format(type(value)))
bookmarks.append(value)
obj._raw_values = frozenset(bookmarks)
return obj
class ServerInfo:
""" Represents a package of information relating to a Neo4j server.
"""
def __init__(self, address, protocol_version):
self._address = address
self._protocol_version = protocol_version
self._metadata = {}
@property
def address(self):
""" Network address of the remote server.
"""
return self._address
@property
def protocol_version(self):
""" Bolt protocol version with which the remote server
communicates. This is returned as a :class:`.Version`
object, which itself extends a simple 2-tuple of
(major, minor) integers.
"""
return self._protocol_version
@property
def agent(self):
""" Server agent string by which the remote server identifies
itself.
"""
return self._metadata.get("server")
@property
def connection_id(self):
""" Unique identifier for the remote server connection.
"""
return self._metadata.get("connection_id")
def update(self, metadata):
""" Update server information with extra metadata. This is
typically drawn from the metadata received after successful
connection initialisation.
"""
self._metadata.update(metadata)
class Version(tuple):
def __new__(cls, *v):
return super().__new__(cls, v)
def __repr__(self):
return "{}{}".format(self.__class__.__name__, super().__repr__())
def __str__(self):
return ".".join(map(str, self))
def to_bytes(self):
b = bytearray(4)
for i, v in enumerate(self):
if not 0 <= i < 2:
raise ValueError("Too many version components")
if isinstance(v, list):
b[-i - 1] = int(v[0] % 0x100)
b[-i - 2] = int((v[0] - v[-1]) % 0x100)
else:
b[-i - 1] = int(v % 0x100)
return bytes(b)
@classmethod
def from_bytes(cls, b):
b = bytearray(b)
if len(b) != 4:
raise ValueError("Byte representation must be exactly four bytes")
if b[0] != 0 or b[1] != 0:
raise ValueError("First two bytes must contain zero")
return Version(b[-1], b[-2])
def parse_neo4j_uri(uri):
parsed = urlparse(uri)
if parsed.username:
raise ConfigurationError("Username is not supported in the URI")
if parsed.password:
raise ConfigurationError("Password is not supported in the URI")
if parsed.scheme == URI_SCHEME_BOLT_ROUTING:
raise ConfigurationError("Uri scheme {!r} have been renamed. Use {!r}".format(parsed.scheme, URI_SCHEME_NEO4J))
elif parsed.scheme == URI_SCHEME_BOLT:
driver_type = DRIVER_BOLT
security_type = SECURITY_TYPE_NOT_SECURE
elif parsed.scheme == URI_SCHEME_BOLT_SELF_SIGNED_CERTIFICATE:
driver_type = DRIVER_BOLT
security_type = SECURITY_TYPE_SELF_SIGNED_CERTIFICATE
elif parsed.scheme == URI_SCHEME_BOLT_SECURE:
driver_type = DRIVER_BOLT
security_type = SECURITY_TYPE_SECURE
elif parsed.scheme == URI_SCHEME_NEO4J:
driver_type = DRIVER_NEO4j
security_type = SECURITY_TYPE_NOT_SECURE
elif parsed.scheme == URI_SCHEME_NEO4J_SELF_SIGNED_CERTIFICATE:
driver_type = DRIVER_NEO4j
security_type = SECURITY_TYPE_SELF_SIGNED_CERTIFICATE
elif parsed.scheme == URI_SCHEME_NEO4J_SECURE:
driver_type = DRIVER_NEO4j
security_type = SECURITY_TYPE_SECURE
else:
raise ConfigurationError("URI scheme {!r} is not supported. Supported URI schemes are {}. Examples: bolt://host[:port] or neo4j://host[:port][?routing_context]".format(
parsed.scheme,
[
URI_SCHEME_BOLT,
URI_SCHEME_BOLT_SELF_SIGNED_CERTIFICATE,
URI_SCHEME_BOLT_SECURE,
URI_SCHEME_NEO4J,
URI_SCHEME_NEO4J_SELF_SIGNED_CERTIFICATE,
URI_SCHEME_NEO4J_SECURE
]
))
return driver_type, security_type, parsed
def check_access_mode(access_mode):
if access_mode is None:
return WRITE_ACCESS
if access_mode not in (READ_ACCESS, WRITE_ACCESS):
msg = "Unsupported access mode {}".format(access_mode)
raise ConfigurationError(msg)
return access_mode
def parse_routing_context(query):
""" Parse the query portion of a URI to generate a routing context dictionary.
"""
if not query:
return {}
context = {}
parameters = parse_qs(query, True)
for key in parameters:
value_list = parameters[key]
| |
_responds(RESULT_SUCCESS, showDict)
class CMD_ShowAddExisting(ApiCall):
_help = {"desc": "add a show in sickbeard with an existing folder",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"},
"location": {"desc": "full path to the existing folder for the show"}
},
"optionalParameters": {"initial": {"desc": "initial quality for the show"},
"archive": {"desc": "archive quality for the show"},
"flatten_folders": {"desc": "flatten subfolders for the show"}
}
}
def __init__(self, args, kwargs):
# required
self.location, args = self.check_params(args, kwargs, "location", None, True, "string", [])
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
# optional
self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"])
self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"])
self.flatten_folders, args = self.check_params(args, kwargs, "flatten_folders", str(sickbeard.FLATTEN_FOLDERS_DEFAULT), False, "bool", [])
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" add a show in sickbeard with an existing folder """
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid))
if showObj:
return _responds(RESULT_FAILURE, msg="An existing tvdbid already exists in the database")
if not ek.ek(os.path.isdir, self.location):
return _responds(RESULT_FAILURE, msg='Not a valid location')
tvdbName = None
tvdbResult = CMD_SickBeardSearchTVDB([], {"tvdbid": self.tvdbid}).run()
if tvdbResult['result'] == result_type_map[RESULT_SUCCESS]:
if not tvdbResult['data']['results']:
return _responds(RESULT_FAILURE, msg="Empty results returned, check tvdbid and try again")
if len(tvdbResult['data']['results']) == 1 and 'name' in tvdbResult['data']['results'][0]:
tvdbName = tvdbResult['data']['results'][0]['name']
if not tvdbName:
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from tvdb")
quality_map = {'sdtv': Quality.SDTV,
'sddvd': Quality.SDDVD,
'hdtv': Quality.HDTV,
'rawhdtv': Quality.RAWHDTV,
'fullhdtv': Quality.FULLHDTV,
'hdwebdl': Quality.HDWEBDL,
'fullhdwebdl': Quality.FULLHDWEBDL,
'hdbluray': Quality.HDBLURAY,
'fullhdbluray': Quality.FULLHDBLURAY,
'unknown': Quality.UNKNOWN}
#use default quality as a failsafe
newQuality = int(sickbeard.QUALITY_DEFAULT)
iqualityID = []
aqualityID = []
if self.initial:
for quality in self.initial:
iqualityID.append(quality_map[quality])
if self.archive:
for quality in self.archive:
aqualityID.append(quality_map[quality])
if iqualityID or aqualityID:
newQuality = Quality.combineQualities(iqualityID, aqualityID)
sickbeard.showQueueScheduler.action.addShow(int(self.tvdbid), self.location, SKIPPED, newQuality, int(self.flatten_folders),"fr", int(sickbeard.SUBTITLES_DEFAULT), sickbeard.AUDIO_SHOW_DEFAULT) #@UndefinedVariable
return _responds(RESULT_SUCCESS, {"name": tvdbName}, tvdbName + " has been queued to be added")
class CMD_ShowAddNew(ApiCall):
_help = {"desc": "add a new show to sickbeard",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}
},
"optionalParameters": {"initial": {"desc": "initial quality for the show"},
"location": {"desc": "base path for where the show folder is to be created"},
"archive": {"desc": "archive quality for the show"},
"flatten_folders": {"desc": "flatten subfolders for the show"},
"status": {"desc": "status of missing episodes"},
"lang": {"desc": "the 2 letter lang abbreviation id"}
}
}
valid_languages = {
'el': 20, 'en': 7, 'zh': 27, 'it': 15, 'cs': 28, 'es': 16, 'ru': 22,
'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31,
'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32,
'sv': 8, 'sl': 30}
def __init__(self, args, kwargs):
# required
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
# optional
self.location, args = self.check_params(args, kwargs, "location", None, False, "string", [])
self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"])
self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"])
self.flatten_folders, args = self.check_params(args, kwargs, "flatten_folders", str(sickbeard.FLATTEN_FOLDERS_DEFAULT), False, "bool", [])
self.status, args = self.check_params(args, kwargs, "status", None, False, "string", ["wanted", "skipped", "archived", "ignored"])
self.lang, args = self.check_params(args, kwargs, "lang", "fr", False, "string", self.valid_languages.keys())
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" add a show in sickbeard with an existing folder """
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid))
if showObj:
return _responds(RESULT_FAILURE, msg="An existing tvdbid already exists in database")
if not self.location:
if sickbeard.ROOT_DIRS != "":
root_dirs = sickbeard.ROOT_DIRS.split('|')
root_dirs.pop(0)
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
self.location = root_dirs[default_index]
else:
return _responds(RESULT_FAILURE, msg="Root directory is not set, please provide a location")
if not ek.ek(os.path.isdir, self.location):
return _responds(RESULT_FAILURE, msg="'" + self.location + "' is not a valid location")
quality_map = {'sdtv': Quality.SDTV,
'sddvd': Quality.SDDVD,
'hdtv': Quality.HDTV,
'rawhdtv': Quality.RAWHDTV,
'fullhdtv': Quality.FULLHDTV,
'hdwebdl': Quality.HDWEBDL,
'fullhdwebdl': Quality.FULLHDWEBDL,
'hdbluray': Quality.HDBLURAY,
'fullhdbluray': Quality.FULLHDBLURAY,
'unknown': Quality.UNKNOWN}
# use default quality as a failsafe
newQuality = int(sickbeard.QUALITY_DEFAULT)
iqualityID = []
aqualityID = []
if self.initial:
for quality in self.initial:
iqualityID.append(quality_map[quality])
if self.archive:
for quality in self.archive:
aqualityID.append(quality_map[quality])
if iqualityID or aqualityID:
newQuality = Quality.combineQualities(iqualityID, aqualityID)
# use default status as a failsafe
newStatus = sickbeard.STATUS_DEFAULT
if self.status:
# convert the string status to a int
for status in statusStrings.statusStrings:
if statusStrings[status].lower() == str(self.status).lower():
self.status = status
break
#TODO: check if obsolete
if not self.status in statusStrings.statusStrings:
raise ApiError("Invalid Status")
# only allow the status options we want
if int(self.status) not in (3, 5, 6, 7):
return _responds(RESULT_FAILURE, msg="Status prohibited")
newStatus = self.status
tvdbName = None
tvdbResult = CMD_SickBeardSearchTVDB([], {"tvdbid": self.tvdbid}).run()
if tvdbResult['result'] == result_type_map[RESULT_SUCCESS]:
if not tvdbResult['data']['results']:
return _responds(RESULT_FAILURE, msg="Empty results returned, check tvdbid and try again")
if len(tvdbResult['data']['results']) == 1 and 'name' in tvdbResult['data']['results'][0]:
tvdbName = tvdbResult['data']['results'][0]['name']
if not tvdbName:
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from tvdb")
# moved the logic check to the end in an attempt to eliminate empty directory being created from previous errors
showPath = ek.ek(os.path.join, self.location, helpers.sanitizeFileName(tvdbName))
# don't create show dir if config says not to
if sickbeard.ADD_SHOWS_WO_DIR:
logger.log(u"Skipping initial creation of " + showPath + " due to config.ini setting")
else:
dir_exists = helpers.makeDir(showPath)
if not dir_exists:
logger.log(u"API :: Unable to create the folder " + showPath + ", can't add the show", logger.ERROR)
return _responds(RESULT_FAILURE, {"path": showPath}, "Unable to create the folder " + showPath + ", can't add the show")
else:
helpers.chmodAsParent(showPath)
sickbeard.showQueueScheduler.action.addShow(int(self.tvdbid), showPath, newStatus, newQuality, int(self.flatten_folders), self.lang, int(sickbeard.SUBTITLES_DEFAULT), sickbeard.AUDIO_SHOW_DEFAULT) #@UndefinedVariable
return _responds(RESULT_SUCCESS, {"name": tvdbName}, tvdbName + " has been queued to be added")
class CMD_ShowCache(ApiCall):
_help = {"desc": "check sickbeard's cache to see if the banner or poster image for a show is valid",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}
}
}
def __init__(self, args, kwargs):
# required
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
# optional
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" check sickbeard's cache to see if the banner or poster image for a show is valid """
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid))
if not showObj:
return _responds(RESULT_FAILURE, msg="Show not found")
#TODO: catch if cache dir is missing/invalid.. so it doesn't break show/show.cache
#return {"poster": 0, "banner": 0}
cache_obj = image_cache.ImageCache()
has_poster = 0
has_banner = 0
if ek.ek(os.path.isfile, cache_obj.poster_path(showObj.tvdbid)):
has_poster = 1
if ek.ek(os.path.isfile, cache_obj.banner_path(showObj.tvdbid)):
has_banner = 1
return _responds(RESULT_SUCCESS, {"poster": has_poster, "banner": has_banner})
class CMD_ShowDelete(ApiCall):
_help = {"desc": "delete a show in sickbeard",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"},
}
}
def __init__(self, args, kwargs):
# required
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
# optional
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" delete a show in sickbeard """
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid))
if not showObj:
return _responds(RESULT_FAILURE, msg="Show not found")
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
return _responds(RESULT_FAILURE, msg="Show can not be deleted while being added or updated")
showObj.deleteShow()
return _responds(RESULT_SUCCESS, msg=str(showObj.name) + " has been deleted")
class CMD_ShowGetQuality(ApiCall):
_help = {"desc": "get quality setting for a show in sickbeard",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}
}
}
def __init__(self, args, kwargs):
# required
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
# optional
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" get quality setting for a show in sickbeard """
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid))
if not showObj:
return _responds(RESULT_FAILURE, msg="Show not found")
anyQualities, bestQualities = _mapQuality(showObj.quality)
return _responds(RESULT_SUCCESS, {"initial": anyQualities, "archive": bestQualities})
class CMD_ShowGetPoster(ApiCall):
_help = {"desc": "get the poster stored for a show in sickbeard",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}
}
}
def __init__(self, args, kwargs):
# required
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
# optional
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" get the poster for a show in sickbeard """
return {'outputType': 'image', 'image': webserve.WebInterface().showPoster(self.tvdbid, 'poster')}
class CMD_ShowGetBanner(ApiCall):
_help = {"desc": "get the banner stored for a show in sickbeard",
| |
Keys or strings
rpc: datastore.RPC to use for this request.
Raises:
TransactionFailedError, if the Delete could not be committed.
"""
rpc = GetRpcFromKwargs(kwargs)
keys, multiple = NormalizeAndTypeCheckKeys(keys)
if multiple and not keys:
return
req = datastore_pb.DeleteRequest()
req.key_list().extend([key._Key__reference for key in keys])
tx = _MaybeSetupTransaction(req, keys)
try:
_MakeSyncCall(
'datastore_v3', 'Delete', req, datastore_pb.DeleteResponse(), rpc)
except apiproxy_errors.ApplicationError, err:
raise _ToDatastoreError(err)
class Entity(dict):
"""A datastore entity.
Includes read-only accessors for app id, kind, and primary key. Also
provides dictionary-style access to properties.
"""
def __init__(self, kind, parent=None, _app=None, name=None, id=None,
unindexed_properties=[], namespace=None, **kwds):
"""Constructor. Takes the kind and transaction root, which cannot be
changed after the entity is constructed, and an optional parent. Raises
BadArgumentError or BadKeyError if kind is invalid or parent is not an
existing Entity or Key in the datastore.
Args:
# this entity's kind
kind: string
# if provided, this entity's parent. Its key must be complete.
parent: Entity or Key
# if provided, this entity's name.
name: string
# if provided, this entity's id.
id: integer
# if provided, a sequence of property names that should not be indexed
# by the built-in single property indices.
unindexed_properties: list or tuple of strings
namespace: string
# if provided, overrides the default namespace_manager setting.
"""
ref = entity_pb.Reference()
_app = datastore_types.ResolveAppId(_app)
ref.set_app(_app)
_namespace = kwds.pop('_namespace', None)
if kwds:
raise datastore_errors.BadArgumentError(
'Excess keyword arguments ' + repr(kwds))
if namespace is None:
namespace = _namespace
elif _namespace is not None:
raise datastore_errors.BadArgumentError(
"Must not set both _namespace and namespace parameters.")
datastore_types.ValidateString(kind, 'kind',
datastore_errors.BadArgumentError)
if parent is not None:
parent = _GetCompleteKeyOrError(parent)
if _app != parent.app():
raise datastore_errors.BadArgumentError(
" %s doesn't match parent's app %s" %
(_app, parent.app()))
if namespace is None:
namespace = parent.namespace()
elif namespace != parent.namespace():
raise datastore_errors.BadArgumentError(
" %s doesn't match parent's namespace %s" %
(namespace, parent.namespace()))
ref.CopyFrom(parent._Key__reference)
namespace = datastore_types.ResolveNamespace(namespace)
datastore_types.SetNamespace(ref, namespace)
last_path = ref.mutable_path().add_element()
last_path.set_type(kind.encode('utf-8'))
if name is not None and id is not None:
raise datastore_errors.BadArgumentError(
"Cannot set both name and id on an Entity")
if name is not None:
datastore_types.ValidateString(name, 'name')
last_path.set_name(name.encode('utf-8'))
if id is not None:
datastore_types.ValidateInteger(id, 'id')
last_path.set_id(id)
self.set_unindexed_properties(unindexed_properties)
self.__key = Key._FromPb(ref)
def app(self):
"""Returns the name of the application that created this entity, a
string or None if not set.
"""
return self.__key.app()
def namespace(self):
"""Returns the namespace of this entity, a string or None.
"""
return self.__key.namespace()
def kind(self):
"""Returns this entity's kind, a string.
"""
return self.__key.kind()
def is_saved(self):
"""Returns if this entity has been saved to the datastore
"""
last_path = self.__key._Key__reference.path().element_list()[-1]
return ((last_path.has_name() ^ last_path.has_id()) and
self.__key.has_id_or_name())
def key(self):
"""Returns this entity's primary key, a Key instance.
"""
return self.__key
def parent(self):
"""Returns this entity's parent, as a Key. If this entity has no parent,
returns None.
"""
return self.key().parent()
def entity_group(self):
"""Returns this entity's entity group as a Key.
Note that the returned Key will be incomplete if this is a a root entity
and its key is incomplete.
"""
return self.key().entity_group()
def unindexed_properties(self):
"""Returns this entity's unindexed properties, as a frozenset of strings."""
return getattr(self, '_Entity__unindexed_properties', [])
def set_unindexed_properties(self, unindexed_properties):
unindexed_properties, multiple = NormalizeAndTypeCheck(unindexed_properties, basestring)
if not multiple:
raise datastore_errors.BadArgumentError(
'unindexed_properties must be a sequence; received %s (a %s).' %
(unindexed_properties, typename(unindexed_properties)))
for prop in unindexed_properties:
datastore_types.ValidateProperty(prop, None)
self.__unindexed_properties = frozenset(unindexed_properties)
def __setitem__(self, name, value):
"""Implements the [] operator. Used to set property value(s).
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(name, value)
dict.__setitem__(self, name, value)
def setdefault(self, name, value):
"""If the property exists, returns its value. Otherwise sets it to value.
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(name, value)
return dict.setdefault(self, name, value)
def update(self, other):
"""Updates this entity's properties from the values in other.
If any property name is the empty string or not a string, raises
BadPropertyError. If any value is not a supported type, raises
BadValueError.
"""
for name, value in other.items():
self.__setitem__(name, value)
def copy(self):
"""The copy method is not supported.
"""
raise NotImplementedError('Entity does not support the copy() method.')
def ToXml(self):
"""Returns an XML representation of this entity. Atom and gd:namespace
properties are converted to XML according to their respective schemas. For
more information, see:
http://www.atomenabled.org/developers/syndication/
http://code.google.com/apis/gdata/common-elements.html
This is *not* optimized. It shouldn't be used anywhere near code that's
performance-critical.
"""
xml = u'<entity kind=%s' % saxutils.quoteattr(self.kind())
if self.__key.has_id_or_name():
xml += ' key=%s' % saxutils.quoteattr(str(self.__key))
xml += '>'
if self.__key.has_id_or_name():
xml += '\n <key>%s</key>' % self.__key.ToTagUri()
properties = self.keys()
if properties:
properties.sort()
xml += '\n ' + '\n '.join(self._PropertiesToXml(properties))
xml += '\n</entity>\n'
return xml
def _PropertiesToXml(self, properties):
""" Returns a list of the XML representations of each of the given
properties. Ignores properties that don't exist in this entity.
Arg:
properties: string or list of strings
Returns:
list of strings
"""
xml_properties = []
for propname in properties:
if not self.has_key(propname):
continue
propname_xml = saxutils.quoteattr(propname)
values = self[propname]
if not isinstance(values, list):
values = [values]
proptype = datastore_types.PropertyTypeName(values[0])
proptype_xml = saxutils.quoteattr(proptype)
escaped_values = self._XmlEscapeValues(propname)
open_tag = u'<property name=%s type=%s>' % (propname_xml, proptype_xml)
close_tag = u'</property>'
xml_properties += [open_tag + val + close_tag for val in escaped_values]
return xml_properties
def _XmlEscapeValues(self, property):
""" Returns a list of the XML-escaped string values for the given property.
Raises an AssertionError if the property doesn't exist.
Arg:
property: string
Returns:
list of strings
"""
assert self.has_key(property)
xml = []
values = self[property]
if not isinstance(values, list):
values = [values]
for val in values:
if hasattr(val, 'ToXml'):
xml.append(val.ToXml())
else:
if val is None:
xml.append('')
else:
xml.append(saxutils.escape(unicode(val)))
return xml
def ToPb(self):
"""Converts this Entity to its protocol buffer representation.
Returns:
entity_pb.Entity
"""
return self._ToPb(False)
def _ToPb(self, mark_key_as_saved=True):
"""Converts this Entity to its protocol buffer representation. Not
intended to be used by application developers.
Returns:
entity_pb.Entity
"""
pb = entity_pb.EntityProto()
pb.mutable_key().CopyFrom(self.key()._ToPb())
last_path = pb.key().path().element_list()[-1]
if mark_key_as_saved and last_path.has_name() and last_path.has_id():
last_path.clear_id()
group = pb.mutable_entity_group()
if self.__key.has_id_or_name():
root = pb.key().path().element(0)
group.add_element().CopyFrom(root)
properties = self.items()
properties.sort()
for (name, values) in properties:
properties = datastore_types.ToPropertyPb(name, values)
if not isinstance(properties, list):
properties = [properties]
for prop in properties:
if (prop.meaning() in datastore_types._RAW_PROPERTY_MEANINGS or
name in self.unindexed_properties()):
pb.raw_property_list().append(prop)
else:
pb.property_list().append(prop)
if pb.property_size() > _MAX_INDEXED_PROPERTIES:
raise datastore_errors.BadRequestError(
'Too many indexed properties for entity %r.' % self.key())
return pb
@staticmethod
def FromPb(pb):
"""Static factory method. Returns the Entity representation of the
given protocol buffer (datastore_pb.Entity).
Args:
pb: datastore_pb.Entity or str encoding of a datastore_pb.Entity
Returns:
Entity: the Entity representation of pb
"""
if isinstance(pb, str):
real_pb = entity_pb.EntityProto()
real_pb.ParseFromString(pb)
pb = real_pb
return Entity._FromPb(pb, require_valid_key=False)
@staticmethod
def _FromPb(pb, require_valid_key=True):
"""Static factory method. Returns the Entity representation of the
given protocol buffer (datastore_pb.Entity). Not intended to be used by
application developers.
The Entity PB's key must be complete. If it isn't, an AssertionError is
raised.
Args:
# a protocol buffer Entity
pb: datastore_pb.Entity
Returns:
# the Entity representation of the argument
Entity
"""
assert pb.key().path().element_size() > 0
last_path = pb.key().path().element_list()[-1]
if require_valid_key:
assert last_path.has_id() ^ last_path.has_name()
if last_path.has_id():
assert last_path.id() != 0
else:
assert last_path.has_name()
assert last_path.name()
unindexed_properties = [p.name() for p in pb.raw_property_list()]
if pb.key().has_name_space():
namespace = pb.key().name_space()
else:
namespace = ''
e = Entity(unicode(last_path.type().decode('utf-8')),
unindexed_properties=unindexed_properties,
_app=pb.key().app(), namespace=namespace)
ref = e.__key._Key__reference
ref.CopyFrom(pb.key())
temporary_values = {}
for prop_list in (pb.property_list(), pb.raw_property_list()):
for prop in prop_list:
try:
value = datastore_types.FromPropertyPb(prop)
except (AssertionError, AttributeError, TypeError, ValueError), e:
raise datastore_errors.Error(
'Property %s is corrupt in the datastore:\n%s' %
(prop.name(), traceback.format_exc()))
multiple = prop.multiple()
if multiple:
value = [value]
name = prop.name()
cur_value = temporary_values.get(name)
if cur_value is None:
temporary_values[name] = value
elif not multiple or not isinstance(cur_value, list):
raise datastore_errors.Error(
'Property %s is corrupt in the datastore; it has multiple '
'values, but is not marked as multiply valued.' % name)
else:
cur_value.extend(value)
| |
'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# ],
# {},
# ),
# 'ggr2-!giraffe-azure': (
# [
# {'label': 'Giraffe ! NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# ],
# {},
# ),
# 'ggr2-!zebra-azure': (
# [
# {'label': 'Zebra ! NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# ],
# {},
# ),
# 'lynx': (
# [
# {'label': 'Lynx NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['lynx'])},
# ],
# {},
# ),
# 'jaguar': (
# [
# {'label': 'Jaguar NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['jaguar'])},
# ],
# {},
# ),
# '!jaguar': (
# [
# {'label': 'Jaguar NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' | |
<gh_stars>0
# ## Importing necessary modules
#Our old friends
import numpy as np
import tensorflow as tf
#Our Model class
from Model import *
#Collect datasets
from datasets import *
#Set random seed
np.random.seed(912)
def train_model(data, labels, params):
"""Train a model.
Args:
data (numpy array): all of the data including the test set
labels (numpy array): all of the corresponding labels
params (dict): a dictionary of model parameters
"""
#reset_graph
tf.set_random_seed(912)
tf.reset_default_graph()
#Construct the model graph
model = Model.get_model(data, labels, params)
with tf.Session() as sess:
#Set the tensorflow random seed
tf.set_random_seed(912)
model.train(sess)
def get_num_model_params(data, labels, params):
"""
Prints the number of trainable parameters in a model.
Args:
data (np array) : dataset to construct a model with
labels (np array) : labels to construct a model with
params (dict) : dictionary for the parameters defining the model.
"""
#reset_graph
tf.reset_default_graph()
#Build the model
model = Model.get_model(data, labels, params)
with tf.Session() as sess:
#initialize variables
sess.run(tf.global_variables_initializer())
total_parameters = 0
print("Filename {}".format(params['filename']))
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print("Total params: {}".format(total_parameters))
def train_single_models(total_images, total_labels, image_shape, test_length, aug_params, data_dir, base_name):
"""Trains a collection of individual models according to hardcoded parameters.
Args:
total_images (numpy array): all images including the test set
total_labels (numpy array): all labels for the images
image_shape (length 3 list): the shape of the images
test_length (int): number of samples to be set aside for testing
aug_params (dict): dictionary of data augmentation settings
data_dir (str): directory to store data
base_name (str): filename prefix to indicate the dataset
"""
#Testing parameters
params = { 'act': tf.nn.relu,
'data_dir' : os.path.join(data_dir, base_name, 'single'),
'start_filter' : 2,
'num_bricks': 3,
'num_times' : 2,
'batch_size' : 128,
'epochs' : 11,
'image_shape' : image_shape,
'learning_rate' : 0.5**8,
'data_augmentation' : None,
'truncate_data' : True,
'valid_num' : test_length,
'shuffle' : True,
'filename' : base_name + '_single_' + 'test',
'regularization_rate' : 0.1**4,
'model_type' : 'single'}
#Run test single model
#train_model(total_images, total_labels, params)
if base_name != 'notMNIST':
params['epochs'] = 80
else:
params['epochs'] = 40
params['truncate_data'] = False
params['data_augmentation'] = aug_params
#ST model parameters
params['start_filter'] = 8
params['num_bricks'] = 3
params['num_times'] = 1
params['filename'] = base_name + '_single_' + 'shallow_and_thin'
#Run shallow and thin single model
train_model(total_images, total_labels, params)
#New medium model params
params['start_filter'] = 16
params['num_bricks'] = 3
params['num_times'] = 2
params['filename'] = base_name + '_single_' + 'medium_'
#Run medium model
train_model(total_images, total_labels, params)
#New deep model params
params['num_times'] = 3
params['filename'] = base_name + '_single_'+ '3_times_'
train_model(total_images, total_labels, params)
def train_collab_models(total_images, total_labels, image_shape, test_length, aug_params, data_dir, base_name):
"""Trains a collection of collaborative and traditional ensembles according to hardcoded parameters
Args:
total_images (numpy array): all images including the test set
total_labels (numpy array): all labels for the images
image_shape (length 3 list): the shape of the images
test_length (int): number of samples to be set aside for testing
aug_params (dict): dictionary of data augmentation settings
data_dir (str): directory to store data
base_name (str): filename prefix to indicate the dataset
"""
#Testing parameters
test_params = { 'act': tf.nn.relu,
'data_dir' : os.path.join(data_dir, base_name, 'collab'),
'start_filter' : 4,
'num_bricks': 3,
'num_times' : 2,
'batch_size' : 128,
'epochs' : 11,
'image_shape' : image_shape,
'learning_rate' : 0.5**8,
'data_augmentation' : None,
'truncate_data' : True,
'valid_num' : test_length,
'shuffle' : True,
'filename' : base_name + '_collab_' + 'test',
'regularization_rate' : 0.1**4,
'model_type' : 'collab'}
#No data augmentation on test. The point is to check convergence and with a truncated dataset augmentation will
#hurt convergence
params = test_params.copy() #has the same general settings even if we don't use most of the parameters because
#they are passed in the list of params
# params['list_of_params'] = [test_params]*3
# train_model(total_images, total_labels, params)
st_params = params.copy()
st_params['start_filter'] = 8
st_params['num_bricks'] = 3
st_params['num_times'] = 1
medium_params = st_params.copy()
#New medium model params
medium_params['start_filter'] = 16
medium_params['num_bricks'] = 3
medium_params['num_times'] = 2
#Add prefix to prevent overwriting log files
# params['filename'] = base_name + '_collab_' + 'truncated_test_ensemble_0.1x0'
params['list_of_params'] = [st_params, medium_params]
#Mixed collab
if base_name != 'notMNIST':
params['epochs'] = 80
else:
params['epochs'] = 40
#Test run with truncated data
#train_model(total_images, total_labels, params)
# params['collab_method'] = 'L2'
# params['collab_weight'] = 0.1**2
# params['filename'] = base_name + '_collab_' + 'truncated_test_L2_0.1x2'
# train_model(total_images, total_labels, params)
#ACTUAL RUN
#First real run
params['truncate_data'] = False
params['data_augmentation'] = aug_params
#Run ensemble
params['collab_method'] = 'None'
params['collab_weight'] = 0
params['filename'] = base_name + '_collab_' + 'shallow_and_thin_and_medium_ensemble'
print("Training Ensemble")
train_model(total_images, total_labels, params)
#Run collab models
for collab_exp in (0,1,2,4):
for collab_method in ("cross_ent", "L1", "L2"):
params['collab_weight'] = 0.1**collab_exp
params['collab_method'] = collab_method
# rename for compatibility with filename parser
if collab_method == 'cross_ent':
collab_method = 'CE'
params['filename'] = base_name + '_collab_' + 'shallow_and_thin_and_medium_' + collab_method + '_0.1x' + str(collab_exp)
print("Training CollabEnsemble")
train_model(total_images, total_labels, params)
def train_models(total_images, total_labels, image_shape, test_length, aug_params, data_dir, base_name):
"""This trains the single, ensemble, and collaborative models according to the passed values."""
# train_single_models(total_images, total_labels, image_shape, test_length, aug_params, data_dir, base_name)
train_collab_models(total_images, total_labels, image_shape, test_length, aug_params, data_dir, base_name)
def train_mnist(data_dir):
"""Train on the MNIST dataset and generate sample image file.
Args:
data_dir(str) : directory to store the log data.
"""
#set data augmentation params
aug_params = {
'rotation_range' : 00.0, #\pm rotation in degrees
'width_shift_range' : 0.125,#\pmFraction of image to shift
'height_shift_range' : 0.125,#\pmFraction of image to shift
'shear_range' : 0.0, #\pm Shear angle in radians
'zoom_range' : 0.0,
'channel_shift_range' : 0.0,#\pm uniform shift in values in each channel
'fill_mode' : 'nearest',
'horizontal_flip' : False,
'vertical_flip' : False,
'rescale' : None}
total_images, total_labels, image_shape, test_length = get_mnist()
label_names = { 0 : '0', 1 : '1', 2 : '2', 3 : '3', 4 : '4', 5 : '5', 6 : '6', 7 : '7', 8 : '8', 9 : '9'}
show_data(total_images, total_labels, label_names, shape = image_shape, title = "MNIST_Sample_Images",
aug_params = None, output_dir = os.path.join('..', 'latex', 'images'))
train_models(total_images, total_labels, image_shape, test_length, None, data_dir, 'MNIST')
def train_notmnist(data_dir):
"""Train on the notMNIST dataset and generate sample image file.
Args:
data_dir(str) : directory to store the log data.
"""
#set data augmentation params
aug_params = {
'rotation_range' : 00.0, #\pm rotation in degrees
'width_shift_range' : 0.125,#\pmFraction of image to shift
'height_shift_range' : 0.125,#\pmFraction of image to shift
'shear_range' : 0.0, #\pm Shear angle in radians
'zoom_range' : 0.0,
'channel_shift_range' : 0.0,#\pm uniform shift in values in each channel
'fill_mode' : 'nearest',
'horizontal_flip' : False,
'vertical_flip' : False,
'rescale' : None}
total_images, total_labels, image_shape, test_length = get_notmnist()
label_names = { 0 : 'A', 1 : 'B', 2 : 'C', 3 : 'D', 4 : 'E', 5 : 'F', 6 : 'G', 7 : 'H', 8 : 'I', 9 : 'J'}
show_data(total_images, total_labels, label_names, shape = image_shape, title = "notMNIST_Sample_Images",
aug_params = None, output_dir = os.path.join('..', 'latex', 'images'))
train_models(total_images, total_labels, image_shape, test_length, None, data_dir, 'notMNIST')
def train_cifar10(data_dir):
"""Train on the cifar10 dataset
Args:
data_dir(str) : directory to store the log data.
"""
#set data augmentation params
aug_params = {
'rotation_range' : 00.0, #\pm rotation in degrees
'width_shift_range' : 0.125,#\pmFraction of image to shift
'height_shift_range' : 0.125,#\pmFraction of image to shift
'shear_range' : 0.0, #\pm Shear angle in radians
'zoom_range' : 0.0,
'channel_shift_range' : 0.0,#\pm uniform shift in values in each channel
'fill_mode' : 'nearest',
'horizontal_flip' : True,
'vertical_flip' : False,
'rescale' : None}
label_names = { 0 | |
in h_pos_goal:
h_filter_goal[(pos - 1) * atom_num:pos * atom_num] = True
del pos
goal_cont_h = np.logical_and(goal_contacts, h_filter_goal)
h_pos_init = parse_top_for_h(topol_file_init)
h_filter_init = np.zeros(atom_num * atom_num, dtype=np.bool)
for pos in h_pos_init:
h_filter_init[(pos - 1) * atom_num:pos * atom_num] = True
del pos
# usually h_filter_init is the same as h_filter_goal since they share same force field
if np.sum(np.logical_xor(h_filter_init, h_filter_goal)) > 0:
print('Warning, H positions in init and goal are different')
del h_pos_goal, h_pos_init
cpu_pool = mp.Pool(mp.cpu_count())
goal_contacts_and_sum = np.sum(goal_contacts)
goal_contacts_xor_sum = get_native_contacts(goal_prot_only, [goal_xtc], ndx_file_goal, goal_contacts,
atom_num, cont_dist, np.logical_xor, pool=cpu_pool)[0]
if goal_contacts_xor_sum != 0:
raise Exception('goal.gro XOR goal.xtc is not 0 - they are different')
else:
del goal_contacts_xor_sum
goal_contacts_and_h_sum = get_native_contacts(goal_prot_only, [goal_xtc], ndx_file_goal, goal_cont_h,
atom_num, cont_dist, np.logical_and, pool=cpu_pool)[0]
# nat_contacts = np.sum(logic_fun(goal_contacts, init_contacts))
if not os.path.exists(init_xtc) or not os.path.exists(goal_xtc) or \
not os.path.exists(topol_file_init) or not os.path.exists(ndx_file_init):
print('Copy initial and final state in to prot_dir')
exit("Copy initial and final state in to prot_dir")
work_dir = os.path.join(os.getcwd(), 'work_dir') # either /dev/shm or os.getcwd()
# counter = 0
# work_dir = os.path.join('/dev/shm', 'work_dir_{}'.format(counter)) # either /dev/shm or os.getcwd()
# while os.path.exists(work_dir):
# counter += 1
# work_dir = os.path.join('/dev/shm', 'work_dir_{}'.format(counter)) # either /dev/shm or os.getcwd()
# del counter
if not os.path.exists(work_dir):
os.makedirs(work_dir)
print('Work dir: ', work_dir)
if not os.path.exists(past_dir):
os.makedirs(past_dir)
print('Past dir: ', past_dir)
simulation_temp = 350
print('Information about the protein:\nIt contains {} atoms and {} hydrogen contacts'
'\n{} phipsi angles is going to be used as for angle distance'
'\nthere are {} protein-protein contacts with distance {}A\nand {} protein-protein-h contacts with distance {}A.'
'\nSimulation temp is set to {}K'
''.format(atom_num, np.sum(goal_cont_h), angl_num, goal_contacts_and_sum, cont_dist,
goal_contacts_and_h_sum, cont_dist, simulation_temp))
seed_start = 0
seed_list = list(range(seed_start, tot_seeds+seed_start))
del seed_start
seed_dirs = get_seed_dirs(work_dir, seed_list, simulation_temp)
# rm_seed_dirs(seed_dirs)
if os.path.exists(os.path.join(os.getcwd(), 'local.comp')):
use_mpi = False
else:
use_mpi = True
scheduler = False
if scheduler:
use_mpi = True
core_map = 16
nomp = 2
hostnames = False
else:
nomp = False
if use_mpi:
hostnames, core_map = parse_hostnames(tot_seeds)
else:
cpu_map = create_core_mapping(nseeds=tot_seeds)
hostnames = False
metric_names = ['BBRMSD', 'AARMSD', 'ANGL', 'AND_H', 'AND', 'XOR']
metric_allowed_sc = {'BBRMSD': 15, 'AARMSD': 20, 'ANGL': 10, 'AND_H': 5, 'AND': 5, 'XOR': 10}
metrics_sequence = ['AARMSD', 'BBRMSD']
metric_rules = define_rules()
cur_metric = 0
cur_metric_name = metrics_sequence[cur_metric]
guiding_metric = 0 # main metric to tack global progress
num_metrics = len(metric_names)
an_file = 'ambient.noise'
err_mult = 0.8
tol_error = check_precomputed_noize(an_file)
noize_file = None
if tol_error is None:
goal_nz = os.path.join(prot_dir, 'folded_for_noise.gro')
if hostnames:
noize_file = gen_file_for_amb_noize(work_dir, seed_list, seed_dirs, ndx_file_goal,
topol_file_goal, goal_nz, hostnames, core_map)
else:
# noize_file = gen_file_for_amb_noize(work_dir, goal_nz, seed_list, seed_dirs, ndx_file_goal, topol_file_goal, goal_nz)
noize_file = gen_file_for_amb_noize(work_dir, seed_list, seed_dirs, ndx_file_goal, topol_file_goal, goal_nz)
# 0 - rmsd, 1 - angles, 2 - h_contacts, 3 - full_contacts_xor, 4 - full_contacts_and
if tol_error is None or len(tol_error) < num_metrics:
if noize_file is None:
noize_file = 'noise.xtc'
goal_nz = os.path.join(prot_dir, 'folded_for_noise.gro')
goal_prot_only_nz = os.path.join(prot_dir, 'goal_prot_nz.gro')
goal_prot_only_nz_bb = os.path.join(prot_dir, 'goal_prot_nz_bb.xtc')
noize_file_bb = os.path.join(prot_dir, 'goal_bb_nz.xtc')
gmx_trjconv(f=goal_nz, o=goal_prot_only_nz, n=ndx_file_goal, s=goal_nz)
gmx_trjconv(f=goal_prot_only_nz, o=goal_prot_only_nz_bb, n=goal_bb_ndx, s=goal_nz)
goal_angle_file_nz = os.path.join(prot_dir, 'goal_angle_nz.dat')
goal_sincos_file_nz = os.path.join(prot_dir, 'goal_sincos_nz.dat')
goal_bb_xtc_nz = os.path.join(prot_dir, 'goal_bb_nz.xtc')
gmx_trjconv(f=goal_nz, o=goal_bb_xtc_nz, n=goal_bb_ndx, s=goal_nz)
gmx_trjconv(f=noize_file, o=noize_file_bb, n=goal_bb_ndx, s=goal_nz)
goal_xtc_nz = os.path.join(prot_dir, 'goal_nz.xtc')
gmx_trjconv(f=goal_nz, o=goal_xtc_nz, n=ndx_file_goal)
get_bb_to_angle_mdsctk(x=goal_bb_xtc_nz, o=goal_angle_file_nz)
get_angle_to_sincos_mdsctk(i=goal_angle_file_nz, o=goal_sincos_file_nz)
with open(goal_sincos_file_nz, 'rb') as file:
initial_1d_array = np.frombuffer(file.read(), dtype=np.float64, count=-1)
goal_angles_nz = np.reshape(initial_1d_array, (-1, angl_num * 2))[0]
del file, initial_1d_array
goal_ind_nz = get_contat_profile_mdsctk(goal_prot_only, goal_xtc, ndx_file_goal, cont_dist)[1:] # first is total num of contacts
goal_contacts_nz = np.zeros(atom_num * atom_num, dtype=np.bool)
goal_contacts_nz[goal_ind_nz] = True
del goal_ind_nz
h_pos_goal_nz = parse_top_for_h(topol_file_goal)
h_filter_goal_nz = np.zeros(atom_num * atom_num, dtype=np.bool)
for pos in h_pos_goal_nz:
h_filter_goal_nz[(pos - 1) * atom_num:pos * atom_num] = True
del h_pos_goal_nz, pos
goal_cont_h_nz = np.logical_and(goal_contacts_nz, h_filter_goal_nz)
goal_contacts_and_h_sum_nz = get_native_contacts(goal_prot_only_nz, [goal_xtc_nz], ndx_file_goal, goal_cont_h_nz,
atom_num, cont_dist, np.logical_and, pool=cpu_pool)[0]
goal_contacts_and_sum_nz = np.sum(goal_contacts_nz)
err_node_info = compute_init_metric(past_dir, tot_seeds, noize_file, noize_file_bb, angl_num,
goal_angles_nz, goal_prot_only_nz, ndx_file_goal, goal_cont_h_nz, atom_num, cont_dist,
h_filter_goal_nz, goal_contacts_nz, goal_contacts_and_h_sum_nz, goal_contacts_and_sum_nz,
goal_conf_files)
tol_error = dict()
for metr_name in metric_names:
tol_error[metr_name] = min([node['{}_to_goal'.format(metr_name)] for node in err_node_info]) * err_mult
save_an_file(an_file, tol_error, metric_names)
del err_node_info, metr_name
del an_file, noize_file
print('Done measuring ambient noise for folded state at {}K.\n'
'Min result for {} seeds was multiplied by {}.\n'
'BBRMSD noise was {:0.5f}A\n'
'AARMSD noise was {:0.5f}A\n'
'PhiPsi angle noise was {:0.5f}\n'
'Contact distance noise with AND logical function for H contacts was {:.3f}\n'
'Contact distance noise with AND logical function was {:.3f}\n'
'Contact distance noise with XOR logical function was {:.3f}\n'
''.format(simulation_temp, tot_seeds, err_mult, tol_error['BBRMSD'], tol_error['AARMSD'], tol_error['ANGL'], tol_error['AND_H'],
tol_error['AND'], tol_error['XOR']))
del err_mult
node_info = compute_init_metric(past_dir, 1, init_xtc, init_xtc_bb, angl_num, goal_angles, init_prot_only,
ndx_file_init, goal_cont_h, atom_num, cont_dist, h_filter_init, goal_contacts,
goal_contacts_and_h_sum, goal_contacts_and_sum, goal_conf_files)
print('Done measuring distance from initial state at {}K.\n'
'BBRMSD dist: {:0.5f}A\n'
'AARMSD dist: {:0.5f}A\n'
'PhiPsi angle difference: {:0.5f}\n'
'H contact disagreement (AND_H): {} of {}\n'
'All contact disagreement (AND): {} of {}\n'
'All contact disagreement (XOR): {}\n'.format(simulation_temp,
node_info['BBRMSD_to_goal'],
node_info['AARMSD_to_goal'],
node_info['ANGL_to_goal'],
node_info['AND_H_to_goal'], goal_contacts_and_h_sum,
node_info['AND_to_goal'], goal_contacts_and_sum,
node_info['XOR_to_goal']))
print('Unfolded to noise ratio:\n'
'BBRMSD : {:.5f}\n'
'AARMSD : {:.5f}\n'
'PhiPsi angles: {:.5f}\n'
'H contact (AND_H) disagreement: {:.5f}\n'
'All contact (AND) disagreement: {:.5f}\n'
'All contact disagreement (XOR): {:.5f}\n'.format(node_info['BBRMSD_to_goal'] / tol_error['BBRMSD'] if tol_error['BBRMSD'] != 0 else float('inf'),
node_info['AARMSD_to_goal'] / tol_error['AARMSD'] if tol_error['AARMSD'] != 0 else float('inf'),
node_info['ANGL_to_goal'] / tol_error['ANGL'] if tol_error['ANGL'] != 0 else float('inf'),
node_info['AND_H_to_goal']/tol_error['AND_H'] if tol_error['AND_H'] != 0 else float('inf'),
node_info['AND_to_goal'] / tol_error['AND'] if tol_error['AND'] != 0 else float('inf'),
node_info['XOR_to_goal'] / tol_error['XOR'] if tol_error['XOR'] != 0 else float('inf')))
# part of code used to study relation between contact distance and noise
# f.write(
# '{} \n'.format(' '.join(str(elem) for elem in [cont_dist, node_info['AND_H_to_goal'], goal_contacts_and_h_sum,
# node_info['AND_H_to_goal'] / goal_contacts_and_h_sum, node_info['AND_to_goal'],
# goal_contacts_and_sum,
# node_info['AND_to_goal'] / goal_contacts_and_sum, node_info['XOR_to_goal'],
# node_info['AND_H_to_goal'] / tol_error['AND_H'],
# node_info['AND_to_goal'] / tol_error['AND'],
# node_info['XOR_to_goal'] / tol_error['XOR']])))
# print('done writing the file')
# exit(22)
# name_2_digest_map = dict()
# digest_2_name_map = dict()
# name_2_digest_map['s'] = get_digest('s')
cur_hash_name = get_digest('s')
# digest_2_name_map[name_2_digest_map['s']] = 's'
main_dict = dict()
main_dict[cur_hash_name] = node_info
open_queue = list()
heapq.heappush(open_queue, (node_info['{}_to_goal'.format(metric_names[0])], 0, cur_hash_name)) # metric_val, attempts, name
['BBRMSD', 'AARMSD', 'ANGL', 'AND_H', 'AND', 'XOR']
init_metr = {'BBRMSD': node_info['BBRMSD_to_goal'], 'AARMSD': node_info['AARMSD_to_goal'], 'ANGL': node_info['ANGL_to_goal'],
'AND_H': node_info['AND_H_to_goal'], 'AND': node_info['AND_to_goal'], 'XOR': node_info['XOR_to_goal']}
cp2(init_xtc[:-4] + '.gro', os.path.join(past_dir, cur_hash_name + '.gro'))
cp2(init_xtc[:-4] + '.xtc', os.path.join(past_dir, cur_hash_name + '.xtc'))
# copy_queue.put_nowait((init_xtc[:-4] + '.gro', os.path.join(past_dir, name_2_digest_map['s'] + '.gro')))
# copy_queue.put_nowait((init_xtc[:-4] + '.xtc', os.path.join(past_dir, name_2_digest_map['s'] + '.xtc')))
# copy_queue.put_nowait(None)
visited_queue = list()
skipped_counter = 0
combined_pg = os.path.join(work_dir, "out.xtc")
combined_pg_bb = os.path.join(work_dir, "out_bb.xtc")
temp_xtc_file = os.path.join(work_dir, "temp.xtc")
temp_xtc_file_bb = os.path.join(work_dir, "temp_bb.xtc")
loop_start = time.perf_counter()
# info_form_str = 'n:{}\db_input_thread:{:.4f}\tg:{:.4f}\ts:{}\tq:{}\tv:{}\tl:{:.2f}s\tc:{:.2f}s'
info_form_str = 'o_q:{:<5} v_q:{:<3} s:{:<3} grm:{:6.2f} gan:{:6.2f} gah:{:<4} gad:{:<4} gxo:{:<4} ' \
't:{:5.2f}s gbrb:{:.3f} gbr:{:.3f} gba:{:.3f} gc:{:<2} ns:{:3.1f} sc:{}'
# node_info['rmds_total'], node_info['rmds_to_goal'], skipped_counter, len(open_queue), len(visited_queue),
# loop_end - loop_start, best_so_far, global_best_so_far, greed_count, greed_mult, seed_change_counter,
# node_info['nat_cont_to_goal']))
# info_form_str.format(len(open_queue), len(visited_queue), skipped_counter, node_info['RMSD_to_goal'],
# node_info['ANGL_to_goal'], node_info['AND_H_to_goal'],
# node_info['AND_to_goal']), node_info['XOR_to_goal'], loop_end - loop_start, best_so_far[1],
# best_so_far[0], greed_count, greed_mult, seed_change_counter)
under_form_str = '{}_{}'
greed_mult = 1.0
greed_count = 0
# con, dbname = get_db_con(tot_seeds)
# insert_into_main_stor(con, node_info, greed_count, name_2_digest_map['s'], 's')
db_input_queue.put_nowait((insert_into_main_stor, (node_info, greed_count, cur_hash_name, 's')))
node_max_att = 4
seed_change_counter = 0
# change_metrics_limit = 3 # how many seed changes(20 iter per change) with no problems we have to have to change cur metricss
# search LMA in the code
# seed_change_limit = 1000
# local_minimum_counter = 0
# local_minim_names = list()
# nmr_structure_switch = 2 # 0 for nmr, 1 for relaxed, 2 for heated
best_so_far = {metr: node_info['{}_to_goal'.format(metr)] for metr in metric_names}
print(best_so_far)
best_so_far_name = {metr: cur_hash_name for metr in metric_names}
# global_best_so_far = best_so_far
Path(combined_pg).touch()
Path(combined_pg_bb).touch()
Path(temp_xtc_file).touch()
Path(temp_xtc_file_bb).touch()
if os.path.exists('./local_min.xtc'):
os.remove(('./local_min.xtc'))
compute_all_at_once = True
counter_since_seed_changed = 0
recover = False # STOP! before changing this toggle read bellow:
# 1. Make backup of your pickles
# 2. Remember number of the last good db - this name should always be the last one
# There was no proper testing of this functionality and backups may overwrite last good state
# Backups rely on time and number of steps, but if you have too fast/slow I/O - everything may go wrong. Thus do the pickle backup.
if recover: | |
<filename>configure_machine/bootstrap_bevy_member_here.py
#!/usr/bin/env python3
# encoding: utf-8-
"""
A utility program to install a SaltStack minion, and optionally, a master with cloud controller.
arguments: add one or more file_roots and pillar_roots entries. Spaces are not permitted.
--file_roots=/absolute/path/to/directory1,{path}relative/to/directory2
--pillar_roots={path}relative/dir
Maintenance command-line switches:
--no-sudo = Do not attempt to run with elevated privileges, use the present level
--no-read-settings = Do not read an existing BEVY_SETTINGS_FILE
"""
import subprocess, os, getpass, socket, platform, ipaddress, sys, time, shutil, tempfile, traceback
from pathlib import Path, PurePosixPath, PureWindowsPath
from urllib.request import urlopen
try:
import yaml # actually imports the PyYAML module
import ifaddr
if platform.system() != 'Linux':
import passlib
except ImportError:
print('\nERROR: Python3 setup incomplete. You are missing required prerequisite modules.')
if platform.system() == 'Windows':
print('Try something like: "py -3 -m pip install pyyaml ifaddr passlib"')
print('If "pip" is not found, you may need to exit and re-open your console window.')
elif platform.system() == 'Darwin': # MacOS
print('Try something like: "sudo -H pip3 install pyyaml ifaddr passlib"')
else: # Linux
print('Try something like: "sudo -H pip3 install pyyaml ifaddr"')
print('If you are using Ubuntu (Debian, etc), you may need to "sudo apt install python3-pip" first.')
print('Then re-run your command.')
sys.exit(10) # Windows ERROR_BAD_ENVIRONMENT
# import my helper modules
#noinspection PyUnresolvedReferences
from helpers import pwd_hash, sudo, salt_call_local, provisioner
# # # # #
# This program attempts to establish a DRY single source of truth in the following files . . .
SRV_ROOT = '/srv' if platform.system()!='Darwin' else '/opt/saltdata' # MacOS 10.15+ prohibits use of /srv
BEVY_SETTINGS_FILE_NAME = SRV_ROOT + '/pillar/01_bevy_settings.sls' # the default Salt location
temp_settings_name = os.path.join(tempfile.gettempdir(), 'salt-bevy_my_settings.conf')
VAGRANT_PROJECTS_ROOT = '/projects'
#
# Normal minions will receive their settings from the Bevy Master.
# If the Bevy Master is a stand-alone server, it might be a "good idea" to connect its /srv directory to
# the /srv directory on your Workstation using a deployment engine such as PyCharm's.
#
# .. A given machine in the Bevy could be a Workstation, a bevy_master (perhaps as a local VM on a workstation),
# or a bevy minion which is a headless server for some service (perhaps also as a local VM).
# Any of these (except a local VM) might very possibly already have been a minion of some other Salt Master
# before our bevy arrives on the scene. We may want to preserve that minion's connection.
# We will attempt to detect that situation, and we will use the setting "additional_minion_tag" (which may contain
# "" or a string literal "2") to allow both minions to operate side-by-side.
# It theoretically might work to have "additional_minion_tag" be any of the values "3" through "Z",
# if we were running three or more minions, but that situation would be really weird.
# # # # #
MY_SETTINGS_FILE_NAME = '/etc/salt-bevy/my_settings.conf' # settings specific to the currant machine
MINIMUM_SALT_VERSION = "2018.3.0" # ... as a string... the month will be converted to an integer below
SALT_BOOTSTRAP_URL = "http://bootstrap.saltstack.com/stable/bootstrap-salt.sh"
SALT_DOWNLOAD_SOURCE = "stable"
SALT_SRV_ROOT = SRV_ROOT + '/salt'
SALT_PILLAR_ROOT = SRV_ROOT + '/pillar'
# the path to write the bootstrap Salt Minion configuration file
GUEST_MASTER_CONFIG_FILE = SRV_ROOT + '/bevymaster_config/minion'
GUEST_MINION_CONFIG_FILE = SRV_ROOT + '/guest_config/minion'
WINDOWS_GUEST_CONFIG_FILE = SRV_ROOT + '/windows_config/minion'
MAC_GUEST_CONFIG_FILE = SRV_ROOT + '/macos_config/minion'
DEFAULT_VAGRANT_PREFIX = '172.17' # first two bytes of Vagrant private network
DEFAULT_VAGRANT_NETWORK = '172.17.0.0/16' # Vagrant private network
minimum_salt_version = MINIMUM_SALT_VERSION.split('.')
# noinspection PyTypeChecker
minimum_salt_version[1] = int(minimum_salt_version[1]) # use numeric compare of month field
this_file = Path(__file__).resolve() # the absolute path name of this program's source
user_ssh_key_file_directory = this_file.parent.parent / 'bevy_srv/salt/ssh_keys' # TODO: move this
argv = [s.strip() for s in sys.argv]
if '--help' in argv:
print(__doc__)
exit()
settings = {} # global variable for entire Bevy settings dictionary
my_settings = {} # settings dictionary for this machine only
user_name = ''
def minion_tag():
'''
tests settings['additional_minion_tag']
returns "" for the primary minion (tag is blank or "none")
or "2" for a second minion running on the same machine,
possibly some other value "3"? if three minions were running, but who would do that?
:return: '' or '2'
'''
amt = my_settings.get('additional_minion_tag', '')
if amt.lower() == 'none':
return ''
return amt
def my_salt_config_file_path():
if platform.system() == 'Windows': # TODO: minion_tag not yet actually supported
return Path('c:\\salt\\conf{}\\minion.d\\00_bevy_boot.conf'.format(minion_tag()))
# else:
return Path('/etc/salt{}/minion.d/00_bevy_boot.conf'.format(minion_tag()))
def write_my_config_file():
conf_file_path = my_salt_config_file_path()
for other_file in conf_file_path.parent.glob('[0-9][0-9]*.conf'):
print('(deleting old configuration file -->', other_file)
other_file.unlink() # delete other (competing) configuration files.
write_config_file(conf_file_path,
is_master=my_settings.get('master', False),
virtual=False, # NOTE: may be a true lie if interactive on a virtual machine.
platform=platform.system(),
master_host=my_settings.get('master_host', False))
def set_file_owned_by(filename, username):
if sys.platform != 'win32': # not on Windows
if sudo.isUserAdmin(): # must have elevated privileges
try:
shutil.chown(str(filename), username, 'staff')
shutil.chown(str(filename.parent), username, 'staff')
except (OSError, AttributeError):
pass
def print_names_of_other_bevy_files():
# scan for other stored bevy definitions in the settings directory (i.e. /srv/pillar/01_bevy_settings.sls.*)
bsf = Path(BEVY_SETTINGS_FILE_NAME)
found_bevys = [bsfx.suffix[1:] for bsfx in bsf.parent.glob('*') if bsfx.stem == bsf.name]
if found_bevys:
print('I found the following saved bevys in your {} directory:\n {}'.format(
Path(BEVY_SETTINGS_FILE_NAME).parent, found_bevys))
def read_bevy_settings_files(context=None, try_temp=False) -> (str, bool): # (new_bevy_name, changed)
"""
This procedure is a complex mess.
It retrieves remembered state for the bevy from disk storage.
There are two files, one for the bevy in general, stored in a pillar file (in YAML format),
and one for only this computer, stored in /etc, in a .conf file, in YAML format.
In addition, there may be any number of other remembered bevys, with each pair of settings files
stored in the same directory, but with a filename extension of the bevy name.
So, the stored values for my workstation for "bevy01" would be in "/etc/salt-bevy/my_settings.conf.bevy01".
The user will be able to switch between bevys by reading the other settings and storing away the existing ones.
The name of another bevy may be passed as a command-line parameter, or in the context dictionary.
If no bevy name is passed, the user will be prompted for one, with the existing bevy as the default.
:param context: a dictionary passed from the normal invocation to the elevated invocation
:param try_temp: See if a previous run stored quique settings somplace other than /etc
:return: a tuple (the new 'bevy' name, was it different than the last run)
many values are returned as the global dictionaries "settings" and "my_settings".
"""
global settings
global my_settings
def read_settings_file(provision_file_name, description="", try_temp=False):
prov_file = Path(provision_file_name)
try:
print("(Trying to read {} settings from '{}')".format(description, prov_file))
with prov_file.open() as provision_file:
stored_settings = yaml.safe_load(provision_file.read()) or {}
except (OSError, yaml.YAMLError) as e:
if try_temp and isinstance(e, FileNotFoundError): # recursive call for trouble writing in /etc directory
stored_settings = read_settings_file(try_temp, 'temporary unique', try_temp=False)
else:
print("Unable to read previous values from {} because {}.".format(provision_file_name, e))
stored_settings = {}
return stored_settings
local_temp_settings_name = context.get('temp_settings_name', temp_settings_name)
arg_bevy_name = new_bevy = ''
# read in the present bevy settings. Will usually be the one we want
settings = read_settings_file(BEVY_SETTINGS_FILE_NAME, "shared") # settings for entire bevy
my_settings = read_settings_file(MY_SETTINGS_FILE_NAME, "unique", try_temp=local_temp_settings_name) # settings for only this machine
try: # this will happen when program has been re-run with elevated privilege
arg_bevy_name = context['bevy']
except (TypeError, KeyError): # the exception will occur at least half of the time
# scan for bevy name in arguments. It will be the only thing without a "--"
for item in argv[1:]:
if not item.startswith('--'):
arg_bevy_name = item
present_bevy = settings.get('bevy', '')
# get the bevy name the operator wants to use today
while ...:
if arg_bevy_name in ['', 'sls', 'conf']: # was not (correctly) defined on command line or context
default = settings.get('bevy', 'bevy01')
print()
print('HINT: Use "local" as your bevy name for masterless operation of Salt on a single machine...')
# ask the user for a new bevy name.
new_bevy = input("Name your bevy: [{}]:".format(default)) or default
if new_bevy.lower() in ['sls', 'conf']:
print('Sorry. "sls" and "conf" are forbidden bevy names.')
continue
else: # was defined on command line or context['bevy']
print('(using bevy name "{}" from context or command line)'.format(arg_bevy_name))
new_bevy = arg_bevy_name
print()
# switch to the new bevy if needed, storing updates if requested
if new_bevy == present_bevy:
return present_bevy, False
# should not normally get here on an elevated pass
new_settings = read_settings_file(BEVY_SETTINGS_FILE_NAME + '.' + | |
from __future__ import print_function
import torch
import torch.nn as nn
from torch.nn import functional as F
from utils.metric import AverageMeter, Timer
import numpy as np
from models.resnet import BiasLayer
from .default import NormalNN, accumulate_acc, loss_fn_kd, Teacher
import copy
class LWF(NormalNN):
def __init__(self, learner_config):
super(LWF, self).__init__(learner_config)
self.previous_teacher = None
self.replay = False
self.past_tasks = []
self.bic_layers = None
self.ete_flag = False
##########################################
# MODEL TRAINING #
##########################################
def learn_batch(self, train_loader, train_dataset, model_save_dir, val_loader=None):
# try to load model
need_train = True
if not self.overwrite:
try:
self.load_model(model_save_dir)
need_train = False
except:
pass
# train
if need_train:
if self.reset_optimizer: # Reset optimizer before learning each task
self.log('Optimizer is reset!')
self.init_optimizer()
# data weighting
self.data_weighting(train_dataset)
# Evaluate the performance of current task
self.log('Epoch:{epoch:.0f}/{total:.0f}'.format(epoch=0,total=self.config['schedule'][-1]))
if val_loader is not None:
self.validation(val_loader)
losses = [AverageMeter() for l in range(3)]
acc = AverageMeter()
batch_time = AverageMeter()
batch_timer = Timer()
for epoch in range(self.config['schedule'][-1]):
self.epoch=epoch
if epoch > 0: self.scheduler.step()
for param_group in self.optimizer.param_groups:
self.log('LR:', param_group['lr'])
batch_timer.tic()
for i, (x, y, task) in enumerate(train_loader):
# verify in train mode
self.model.train()
# send data to gpu
if self.gpu:
x =x.cuda()
y = y.cuda()
# if KD
if self.replay:
allowed_predictions = list(range(self.last_valid_out_dim))
y_hat, _ = self.previous_teacher.generate_scores(x, allowed_predictions=allowed_predictions)
else:
y_hat = None
# model update - training data
loss, loss_class, loss_distill, output= self.update_model(x, y, y_hat)
# measure elapsed time
batch_time.update(batch_timer.toc())
# measure accuracy and record loss
y = y.detach()
accumulate_acc(output, y, task, acc, topk=(self.top_k,))
losses[0].update(loss, y.size(0))
losses[1].update(loss_class, y.size(0))
losses[2].update(loss_distill, y.size(0))
batch_timer.tic()
# eval update
self.log('Epoch:{epoch:.0f}/{total:.0f}'.format(epoch=self.epoch+1,total=self.config['schedule'][-1]))
self.log(' * Loss {loss.avg:.3f} | Train Acc {acc.avg:.3f}'.format(loss=losses[0],acc=acc))
self.log(' * Class Loss {loss.avg:.3f} | KD Loss {lossb.avg:.3f}'.format(loss=losses[1],lossb=losses[2]))
# Evaluate the performance of current task
if val_loader is not None:
self.validation(val_loader)
# reset
losses = [AverageMeter() for l in range(3)]
acc = AverageMeter()
self.model.eval()
self.past_tasks.append(np.arange(self.last_valid_out_dim,self.valid_out_dim))
self.last_valid_out_dim = self.valid_out_dim
self.first_task = False
# E2W
if self.ete_flag:
# for eval
if self.previous_teacher is not None:
self.previous_previous_teacher = self.previous_teacher
# new teacher
teacher = Teacher(solver=self.model)
self.previous_teacher = copy.deepcopy(teacher)
# Extend memory
self.task_count += 1
if self.memory_size > 0:
train_dataset.update_coreset_ete(self.memory_size, np.arange(self.last_valid_out_dim), teacher)
# BiC
elif self.bic_layers is None:
# Extend memory
self.task_count += 1
if self.memory_size > 0:
train_dataset.update_coreset(self.memory_size, np.arange(self.last_valid_out_dim))
# for eval
if self.previous_teacher is not None:
self.previous_previous_teacher = self.previous_teacher
# new teacher
teacher = Teacher(solver=self.model)
self.previous_teacher = copy.deepcopy(teacher)
if len(self.config['gpuid']) > 1:
self.previous_linear = copy.deepcopy(self.model.module.last)
else:
self.previous_linear = copy.deepcopy(self.model.last)
# LwF
else:
# for eval
if self.previous_teacher is not None:
self.previous_previous_teacher = self.previous_teacher
# new teacher
teacher = TeacherBiC(solver=self.model, bic_layers = self.bic_layers)
self.previous_teacher = copy.deepcopy(teacher)
# Extend memory
self.task_count += 1
if self.memory_size > 0:
train_dataset.update_coreset_ic(self.memory_size, np.arange(self.last_valid_out_dim), teacher)
self.replay = True
try:
return batch_time.avg
except:
return None
def update_model(self, inputs, targets, target_KD = None):
total_loss = torch.zeros((1,), requires_grad=True).cuda()
# classification loss
if self.dw:
dw_cls = self.dw_k[targets.long()]
else:
dw_cls = self.dw_k[-1 * torch.ones(targets.size()).long()]
logits = self.forward(inputs)
loss_class = self.criterion(logits, targets.long(), dw_cls)
total_loss += loss_class
# KD
if target_KD is not None:
dw_KD = self.dw_k[-1 * torch.ones(len(target_KD),).long()]
logits_KD = logits
loss_distill = loss_fn_kd(logits_KD, target_KD, dw_KD, np.arange(self.last_valid_out_dim).tolist(), self.DTemp)
total_loss += self.mu * loss_distill
else:
loss_distill = torch.zeros((1,), requires_grad=True).cuda()
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
return total_loss.detach(), loss_class.detach(), loss_distill.detach(), logits
##########################################
# MODEL UTILS #
##########################################
def combine_data(self, data):
x, y = [],[]
for i in range(len(data)):
x.append(data[i][0])
y.append(data[i][1])
x, y = torch.cat(x), torch.cat(y)
return x, y
class LWF_MC(LWF):
def __init__(self, learner_config):
super(LWF_MC, self).__init__(learner_config)
def update_model(self, inputs, targets, target_KD = None):
# get output
logits = self.forward(inputs)
# KD
if target_KD is not None:
target = get_one_hot(targets, self.valid_out_dim)
target_KD = F.softmax(target_KD, dim=1)
target[:, :self.last_valid_out_dim] = target_KD
log_logits= F.log_softmax(logits, dim=1)
KD_loss_unnorm = -(target * log_logits)
KD_loss_unnorm = KD_loss_unnorm.sum(dim=1)
total_loss= KD_loss_unnorm.mean()
else:
dw_cls = self.dw_k[-1 * torch.ones(targets.size()).long()]
total_loss = self.criterion(logits, targets.long(), dw_cls)
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
return total_loss.detach(), total_loss.detach(), torch.zeros((1,), requires_grad=True).cuda().detach(), logits
class ETE(LWF):
def __init__(self, learner_config):
super(ETE, self).__init__(learner_config)
self.ete_flag = True
def update_model(self, inputs, targets, target_KD = None):
# classification loss
dw_cls = self.dw_k[-1 * torch.ones(targets.size()).long()]
logits = self.forward(inputs)
loss_class = self.criterion(logits, targets.long(), dw_cls)
total_loss = loss_class
# KD
if target_KD is not None:
dw_KD = self.dw_k[-1 * torch.ones(len(target_KD),).long()]
logits_KD = logits
for task_l in self.past_tasks:
loss_distill = loss_fn_kd(logits_KD, target_KD, dw_KD, task_l.tolist(), self.DTemp)
total_loss += self.mu * loss_distill * (len(task_l) / self.last_valid_out_dim)
else:
loss_distill = torch.zeros((1,), requires_grad=True).cuda()
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
return total_loss.detach(), loss_class.detach(), loss_distill.detach(), logits
def update_model_b(self, inputs, targets, target_KD = None, target_KD_B = None):
# classification loss
dw_cls = self.dw_k[-1 * torch.ones(targets.size()).long()]
logits = self.forward(inputs)
loss_class = self.criterion(logits, targets.long(), dw_cls)
total_loss = loss_class
# KD
if target_KD is not None:
dw_KD = self.dw_k[-1 * torch.ones(len(target_KD),).long()]
logits_KD = logits
for task_l in self.past_tasks:
loss_distill = loss_fn_kd(logits_KD, target_KD, dw_KD, task_l.tolist(), self.DTemp)
total_loss += self.mu * loss_distill * (len(task_l) / self.valid_out_dim)
# current task
loss_distill = loss_fn_kd(logits_KD[:, self.last_valid_out_dim:self.valid_out_dim], target_KD, dw_KD, np.arange(self.valid_out_dim-self.last_valid_out_dim), self.DTemp)
total_loss += self.mu * loss_distill * ((self.valid_out_dim-self.last_valid_out_dim) / self.valid_out_dim)
else:
loss_distill = torch.zeros((1,), requires_grad=True).cuda()
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
return total_loss.detach(), loss_class.detach(), loss_distill.detach(), logits
def learn_batch(self, train_loader, train_dataset, model_save_dir, val_loader=None):
if self.task_count == 0:
return super(ETE, self).learn_batch(train_loader, train_dataset, model_save_dir, val_loader)
# try to load model
need_train = True
if not self.overwrite:
try:
self.load_model(model_save_dir)
need_train = False
except:
pass
# trains
if need_train:
if self.reset_optimizer: # Reset optimizer before learning each task
self.log('Optimizer is reset!')
self.init_optimizer()
# data weighting
self.data_weighting(train_dataset)
# Evaluate the performance of current task
self.log('Epoch:{epoch:.0f}/{total:.0f}'.format(epoch=0,total=self.config['schedule'][-1]))
if val_loader is not None:
self.validation(val_loader)
losses = [AverageMeter() for l in range(3)]
acc = AverageMeter()
batch_time = AverageMeter()
batch_timer = Timer()
for epoch in range(self.config['schedule'][-1]):
self.epoch=epoch
if epoch > 0: self.scheduler.step()
for param_group in self.optimizer.param_groups:
self.log('LR:', param_group['lr'])
batch_timer.tic()
for i, (x, y, task) in enumerate(train_loader):
# verify in train mode
self.model.train()
# send data to gpu
if self.gpu:
x =x.cuda()
y = y.cuda()
# if KD
if self.replay:
allowed_predictions = list(range(self.last_valid_out_dim))
y_hat, _ = self.previous_teacher.generate_scores(x, allowed_predictions=allowed_predictions)
else:
y_hat = None
# model update - training data
loss, loss_class, loss_distill, output= self.update_model(x, y, y_hat)
# measure elapsed time
batch_time.update(batch_timer.toc())
# measure accuracy and record loss
y = y.detach()
accumulate_acc(output, y, task, acc, topk=(self.top_k,))
losses[0].update(loss, y.size(0))
losses[1].update(loss_class, y.size(0))
losses[2].update(loss_distill, y.size(0))
batch_timer.tic()
# eval update
self.log('Epoch:{epoch:.0f}/{total:.0f}'.format(epoch=self.epoch+1,total=self.config['schedule'][-1]))
self.log(' * Loss {loss.avg:.3f} | Train Acc {acc.avg:.3f}'.format(loss=losses[0],acc=acc))
self.log(' * Class Loss {loss.avg:.3f} | KD Loss {lossb.avg:.3f}'.format(loss=losses[1],lossb=losses[2]))
# Evaluate the performance of current task
if val_loader is not None:
self.validation(val_loader)
# reset
losses = [AverageMeter() for l in range(3)]
acc = AverageMeter()
# new teacher
teacher = Teacher(solver=self.model)
self.current_teacher = copy.deepcopy(teacher)
# Extend memory
self.task_count += 1
if self.memory_size > 0:
train_dataset.update_coreset_ete(self.memory_size, np.arange(self.valid_out_dim), teacher)
# trains
if need_train:
# part b
# dataset tune
train_dataset.load_dataset(train_dataset.t, train=True)
train_dataset.append_coreset(only=True)
self.config['lr'] = self.config['lr'] / 1e2
if len(self.config['gpuid']) > 1:
self.optimizer, self.scheduler = self.new_optimizer(self.model.module.last)
else:
self.optimizer, self.scheduler = self.new_optimizer(self.model.last)
self.config['lr'] = self.config['lr'] * 1e2
# Evaluate the performance of current task
self.log('Balance Epoch:{epoch:.0f}/{total:.0f}'.format(epoch=0,total=self.config['schedule'][-1]))
if val_loader is not None:
self.validation(val_loader)
losses = [AverageMeter() for l in range(3)]
acc = AverageMeter()
batch_time = AverageMeter()
batch_timer = Timer()
for epoch in range(self.config['schedule'][-1]):
self.epoch=epoch
if epoch > 0: self.scheduler.step()
for param_group in self.optimizer.param_groups:
self.log('LR:', param_group['lr'])
batch_timer.tic()
for i, (x, y, task) in enumerate(train_loader):
# verify in train mode
self.model.train()
# send data to gpu
if self.gpu:
x =x.cuda()
y = y.cuda()
# if KD
if self.replay:
allowed_predictions = list(range(self.last_valid_out_dim))
y_hat, _ = self.previous_teacher.generate_scores(x, allowed_predictions=allowed_predictions)
y_hat_b, _ = self.current_teacher.generate_scores(x, allowed_predictions=np.arange(self.last_valid_out_dim, self.valid_out_dim))
else:
y_hat = None
# model update - training data
loss, loss_class, loss_distill, output= self.update_model_b(x, y, y_hat, y_hat_b)
# measure elapsed time
batch_time.update(batch_timer.toc())
# measure accuracy and record loss
y = y.detach()
accumulate_acc(output, y, task, acc, topk=(self.top_k,))
losses[0].update(loss, y.size(0))
losses[1].update(loss_class, y.size(0))
losses[2].update(loss_distill, y.size(0))
batch_timer.tic()
# eval update
self.log('Balanced Epoch:{epoch:.0f}/{total:.0f}'.format(epoch=self.epoch+1,total=self.config['schedule'][-1]))
self.log(' * Loss {loss.avg:.3f} | Train Acc {acc.avg:.3f}'.format(loss=losses[0],acc=acc))
self.log(' * Class Loss {loss.avg:.3f} | KD Loss {lossb.avg:.3f}'.format(loss=losses[1],lossb=losses[2]))
# Evaluate the performance of current task
if val_loader is not None:
self.validation(val_loader)
# reset
losses = [AverageMeter() for l in range(3)]
acc = AverageMeter()
# dataset final
train_dataset.load_dataset(train_dataset.t, train=True)
train_dataset.append_coreset(only=False)
self.model.eval()
self.past_tasks.append(np.arange(self.last_valid_out_dim,self.valid_out_dim))
self.last_valid_out_dim = self.valid_out_dim
self.first_task | |
<reponame>grlee77/nipype
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""SPM wrappers for preprocessing data
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
# Standard library imports
from copy import deepcopy
import os
# Third-party imports
import numpy as np
# Local imports
from nipype.interfaces.base import (OutputMultiPath, TraitedSpec, isdefined,
traits, InputMultiPath, File)
from nipype.interfaces.spm.base import (SPMCommand, scans_for_fname,
func_is_3d, Info,
scans_for_fnames, SPMCommandInputSpec)
from nipype.utils.filemanip import (fname_presuffix, filename_to_list,
list_to_filename, split_filename)
class SliceTimingInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='scans',
desc='list of filenames to apply slice timing',
mandatory=True, copyfile=False)
num_slices = traits.Int(field='nslices',
desc='number of slices in a volume',
mandatory=True)
time_repetition = traits.Float(field='tr',
desc=('time between volume acquisitions'
'(start to start time)'),
mandatory=True)
time_acquisition = traits.Float(field='ta',
desc=('time of volume acquisition. usually'
'calculated as TR-(TR/num_slices)'),
mandatory=True)
slice_order = traits.List(traits.Int(), field='so',
desc='1-based order in which slices are acquired',
mandatory=True)
ref_slice = traits.Int(field='refslice',
desc='1-based Number of the reference slice',
mandatory=True)
out_prefix = traits.String('a', field='prefix', usedefault=True,
desc='slicetimed output prefix')
class SliceTimingOutputSpec(TraitedSpec):
timecorrected_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='slice time corrected files')
class SliceTiming(SPMCommand):
"""Use spm to perform slice timing correction.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=19
Examples
--------
>>> from nipype.interfaces.spm import SliceTiming
>>> st = SliceTiming()
>>> st.inputs.in_files = 'functional.nii'
>>> st.inputs.num_slices = 32
>>> st.inputs.time_repetition = 6.0
>>> st.inputs.time_acquisition = 6. - 6./32.
>>> st.inputs.slice_order = range(32,0,-1)
>>> st.inputs.ref_slice = 1
>>> st.run() # doctest: +SKIP
"""
input_spec = SliceTimingInputSpec
output_spec = SliceTimingOutputSpec
_jobtype = 'temporal'
_jobname = 'st'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(filename_to_list(val),
keep4d=False,
separate_sessions=True)
return super(SliceTiming, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['timecorrected_files'] = []
filelist = filename_to_list(self.inputs.in_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f]
else:
run = fname_presuffix(f, prefix=self.inputs.out_prefix)
outputs['timecorrected_files'].append(run)
return outputs
class RealignInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='data',
mandatory=True, copyfile=True,
desc='list of filenames to realign')
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
quality = traits.Range(low=0.0, high=1.0, field='eoptions.quality',
desc='0.1 = fast, 1.0 = precise')
fwhm = traits.Range(low=0.0, field='eoptions.fwhm',
desc='gaussian smoothing kernel width')
separation = traits.Range(low=0.0, field='eoptions.sep',
desc='sampling separation in mm')
register_to_mean = traits.Bool(True, field='eoptions.rtm',
mandatory=True, usedefault=True,
desc='Indicate whether realignment is done to the mean image')
weight_img = File(exists=True, field='eoptions.weight',
desc='filename of weighting image')
interp = traits.Range(low=0, high=7, field='eoptions.interp',
desc='degree of b-spline used for interpolation')
wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='eoptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_which = traits.ListInt([2, 1], field='roptions.which',
minlen=2, maxlen=2, usedefault=True,
desc='determines which images to reslice')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='realigned output prefix')
class RealignOutputSpec(TraitedSpec):
mean_image = File(exists=True, desc='Mean image file from the realignment')
modified_in_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='Copies of all files passed to in_files.\
Headers will have been modified to align all\
images with the first, or optionally to first\
do that, extract a mean image, and re-align to\
that mean image.')
realigned_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='If jobtype is write or estwrite, these will be the\
resliced files. Otherwise, they will be copies of\
in_files that have had their headers rewritten.')
realignment_parameters = OutputMultiPath(File(exists=True),
desc='Estimated translation and rotation parameters')
class Realign(SPMCommand):
"""Use spm_realign for estimating within modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=25
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> realign = spm.Realign()
>>> realign.inputs.in_files = 'functional.nii'
>>> realign.inputs.register_to_mean = True
>>> realign.run() # doctest: +SKIP
"""
input_spec = RealignInputSpec
output_spec = RealignOutputSpec
_jobtype = 'spatial'
_jobname = 'realign'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(val,
keep4d=True,
separate_sessions=True)
return super(Realign, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Realign, self)._parse_inputs()
return [{'%s' % (self.inputs.jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
resliced_all = self.inputs.write_which[0] > 0
resliced_mean = self.inputs.write_which[1] > 0
if isdefined(self.inputs.in_files):
outputs['realignment_parameters'] = []
for imgf in self.inputs.in_files:
if isinstance(imgf, list):
tmp_imgf = imgf[0]
else:
tmp_imgf = imgf
outputs['realignment_parameters'].append(fname_presuffix(tmp_imgf,
prefix='rp_',
suffix='.txt',
use_ext=False))
if not isinstance(imgf, list) and func_is_3d(imgf):
break
if self.inputs.jobtype == "estimate":
outputs['realigned_files'] = self.inputs.in_files
if self.inputs.jobtype == "estimate" or self.inputs.jobtype == "estwrite":
outputs['modified_in_files'] = self.inputs.in_files
if self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isinstance(self.inputs.in_files[0], list):
first_image = self.inputs.in_files[0][0]
else:
first_image = self.inputs.in_files[0]
if resliced_mean:
outputs['mean_image'] = fname_presuffix(first_image, prefix='mean')
if resliced_all:
outputs['realigned_files'] = []
for idx, imgf in enumerate(filename_to_list(self.inputs.in_files)):
realigned_run = []
if isinstance(imgf, list):
for i, inner_imgf in enumerate(filename_to_list(imgf)):
newfile = fname_presuffix(inner_imgf,
prefix=self.inputs.out_prefix)
if os.path.exists(newfile):
realigned_run.append(newfile)
continue
if (idx == 0) and (i == 0) and \
func_is_3d(inner_imgf):
realigned_run.append(fname_presuffix(inner_imgf,
prefix=''))
else:
realigned_run = fname_presuffix(imgf,
prefix=self.inputs.out_prefix)
if (idx == 0) and func_is_3d(imgf):
realigned_run = fname_presuffix(imgf, prefix='')
outputs['realigned_files'].append(realigned_run)
return outputs
class CoregisterInputSpec(SPMCommandInputSpec):
target = File(exists=True, field='ref', mandatory=True,
desc='reference file to register to', copyfile=False)
source = InputMultiPath(File(exists=True), field='source',
desc='file to register to target', copyfile=True,
mandatory=True)
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
apply_to_files = InputMultiPath(File(exists=True), field='other',
desc='files to apply transformation to',
copyfile=True)
cost_function = traits.Enum('mi', 'nmi', 'ecc', 'ncc',
field='eoptions.cost_fun',
desc="""cost function, one of: 'mi' - Mutual Information,
'nmi' - Normalised Mutual Information,
'ecc' - Entropy Correlation Coefficient,
'ncc' - Normalised Cross Correlation""")
fwhm = traits.List(traits.Float(), minlen=2, maxlen=2,
field='eoptions.fwhm',
desc='gaussian smoothing kernel width (mm)')
separation = traits.List(traits.Float(), field='eoptions.sep',
desc='sampling separation in mm')
tolerance = traits.List(traits.Float(), field='eoptions.tol',
desc='acceptable tolerance for each of 12 params')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='coregistered output prefix')
class CoregisterOutputSpec(TraitedSpec):
coregistered_source = OutputMultiPath(File(exists=True),
desc='Coregistered source files')
coregistered_files = OutputMultiPath(File(exists=True),
desc='Coregistered other files')
class Coregister(SPMCommand):
"""Use spm_coreg for estimating cross-modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=39
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> coreg = spm.Coregister()
>>> coreg.inputs.target = 'functional.nii'
>>> coreg.inputs.source = 'structural.nii'
>>> coreg.run() # doctest: +SKIP
"""
input_spec = CoregisterInputSpec
output_spec = CoregisterOutputSpec
_jobtype = 'spatial'
_jobname = 'coreg'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'target' or (opt == 'source' and self.inputs.jobtype != "write"):
return scans_for_fnames(filename_to_list(val),
keep4d=True)
if opt == 'apply_to_files':
return np.array(filename_to_list(val), dtype=object)
if opt == 'source' and self.inputs.jobtype == "write":
if isdefined(self.inputs.apply_to_files):
return scans_for_fnames(val+self.inputs.apply_to_files)
else:
return scans_for_fnames(val)
return super(Coregister, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm coregister options if set to None ignore
"""
if self.inputs.jobtype == "write":
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype', 'apply_to_files'))
else:
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype'))
jobtype = self.inputs.jobtype
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = self.inputs.apply_to_files
outputs['coregistered_source'] = self.inputs.source
elif self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = []
for imgf in filename_to_list(self.inputs.apply_to_files):
outputs['coregistered_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
outputs['coregistered_source'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['coregistered_source'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class NormalizeInputSpec(SPMCommandInputSpec):
template = File(exists=True, field='eoptions.template',
desc='template file to normalize to',
mandatory=True, xor=['parameter_file'],
copyfile=False)
source = InputMultiPath(File(exists=True), field='subj.source',
desc='file to normalize to template',
xor=['parameter_file'],
mandatory=True, copyfile=True)
jobtype = traits.Enum('estwrite', 'est', 'write', usedefault=True,
desc='Estimate, Write or do both')
apply_to_files = InputMultiPath(traits.Either(File(exists=True),
traits.List(File(exists=True))),
field='subj.resample',
desc='files to apply transformation to',
copyfile=True)
parameter_file = File(field='subj.matname', mandatory=True,
xor=['source', 'template'],
desc='normalization parameter file*_sn.mat', copyfile=False)
source_weight = File(field='subj.wtsrc',
desc='name of weighting image for source', copyfile=False)
template_weight = File(field='eoptions.weight',
desc='name of weighting image for template', copyfile=False)
source_image_smoothing = traits.Float(field='eoptions.smosrc',
desc='source smoothing')
template_image_smoothing = traits.Float(field='eoptions.smoref',
desc='template smoothing')
affine_regularization_type = traits.Enum('mni', 'size', 'none',
field='eoptions.regtype',
desc='mni, size, none')
DCT_period_cutoff = traits.Float(field='eoptions.cutoff',
desc='Cutoff of for DCT bases')
nonlinear_iterations = traits.Int(field='eoptions.nits',
desc='Number of iterations of nonlinear warping')
nonlinear_regularization = traits.Float(field='eoptions.reg',
desc='the amount of the regularization for the nonlinear part of the normalization')
write_preserve = traits.Bool(field='roptions.preserve',
desc='True/False warped images are modulated')
write_bounding_box = traits.List(traits.List(traits.Float(), minlen=3,
maxlen=3),
field='roptions.bb', minlen=2, maxlen=2,
desc='3x2-element list of lists')
write_voxel_sizes = traits.List(traits.Float(), field='roptions.vox',
minlen=3, maxlen=3,
desc='3-element list')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for | |
<gh_stars>0
"""An optical ray tracing module with a function for dealing with spherical
surfaces as well as planar surfaces, and also with opaque detector surfaces.
Module is also capable of plotting ray paths and surfaces, tgogether with an
illustration of the distribution of rays at the output. Function can be used to
evaluate the image quality of a give optical design, and a function for
optimising the image quality by adjusting radii of survature."""
USER = "<NAME>"
USER_ID = "bbvw84"
import numpy
import matplotlib.pyplot as pyplot
import matplotlib.path as pypath
import matplotlib.patches as patches
import matplotlib.hatch as hatch
import sys
#Mini-project original code
def refraction_2d (incident_rays, planar_surface):
'''A function which can calculate the location of a refracted ray and also
the angle the refracted ray makes with the vertical. Input requires is an
array for both incident rays and planar surface.'''
funcnames = {'ref2d': refraction_2d}
#Working out the angle the plane is tilted at:
vertical_1 = planar_surface[0] - planar_surface[2]
horizontal_1 = planar_surface[1] - planar_surface[3]
angle_plane = numpy.arctan((vertical_1/horizontal_1))
#print ap
#gradients
#plane
gradient_plane = (planar_surface[3] - planar_surface[1])/(planar_surface[2] - planar_surface[0])
#incident ray
gradient_ray = numpy.tan((numpy.pi / 2) - incident_rays[:,2])
#y-intercepts
#planar equation
intercept_plane = (- planar_surface[0] * gradient_plane) + planar_surface[1]
#incident ray
intercept_ray = (- incident_rays[:,0] * gradient_ray) + incident_rays[:,1]
#point of intersection with plane
#x-coord
x_intersect_plane = (intercept_plane - intercept_ray)/(gradient_ray - gradient_plane)
#y-coord
y_intersect_plane = ((intercept_ray * gradient_plane) - (intercept_plane * gradient_ray)) / (gradient_plane - gradient_ray)
#angle of incidence - corrected
angle_incidence = incident_rays[:,2] - (numpy.pi / 2) - angle_plane
numpy.seterr(all = 'ignore') #ignores those pesky runtime error
critical_angle = numpy.arcsin((planar_surface[5] / planar_surface[4]))
#Critical angle function - corrected
if planar_surface[5] >= planar_surface[4]:
tir_possible = False
else:
tir_possible = True
#function terminator if the angles given are greater than the critical angle
if tir_possible and any(angle_incidence) >= critical_angle:
raise Exception, "at least one incident ray exceeds the critical angle"
return
#else:
#print "Incident angles are valid, function will proceed."
#Corrected code for the angle of refraction
refracted_angles = numpy.arcsin(planar_surface[4]*numpy.sin(angle_incidence)/planar_surface[5]) # Snell's law
refracted_angles = refracted_angles + (numpy.pi/2.0) + angle_plane # transform output ray angles to be clockwise from vertical
#Outputting final array for refracted rays
final_ray = numpy.array([x_intersect_plane, y_intersect_plane, refracted_angles]) #the rays are shown vertically
final_ray = numpy.rot90(final_ray, 3) #next two lines of code are to make it look the same as the initial incident rays array
refracted_rays = numpy.fliplr(final_ray)
"""
print "refracted_rays: arse"
print refracted_rays
"""
return refracted_rays
#Task 1
def refraction_2d_sph (incident_rays, spherical_surface):
'''[FUCKING EDIT ME] A function which can calculate the location of a refracted ray and also
the angle the refracted ray makes with the vertical. Input requires is an
array for both incident rays and planar surface.'''
#gradients
#incident ray
gradient_ray = numpy.tan((numpy.pi / 2.0) - incident_rays[:,2])
#print "gradient_ray", gradient_ray
#y-intercepts
#incident ray
intercept_ray = (incident_rays[:,1] - (gradient_ray * incident_rays[:, 0]))
#print "intercept_ray", intercept_ray
#numpy.seterr(all = 'ignore') #ignores those pesky runtime error
#critical_angle = numpy.arcsin((spherical_surface[5] / spherical_surface[4]))
#Critical angle function - corrected
if spherical_surface[5] >= spherical_surface[4]:
tir_possible = False
else:
tir_possible = True
#function terminator if the angles given are greater than the critical angle
if tir_possible and any(angle_incidence) >= critical_angle:
raise Exception, "at least one incident ray exceeds the critical angle"
return
#else:
#print "Incident angles are valid, function will proceed."
#Calculating the centre of the circle
if spherical_surface[6] > 0:
circle_centre_c = spherical_surface[2] + numpy.sqrt((spherical_surface[6] ** 2) - (spherical_surface[1] ** 2))
refraction_2d_sph.ccc = circle_centre_c
ccc = circle_centre_c
#print "circle_centre_c", circle_centre_c
else:
ss = spherical_surface
circle_centre_c = ss[2] + (ss[6] / numpy.abs(ss[6])) * numpy.sqrt(((ss[6] ** 2) - (ss[1] ** 2)))
refraction_2d_sph.ccc = circle_centre_c
ccc = circle_centre_c
#Working out the point of intersection with surface
#Quadratic for x coordiante
##Determinant calculator
a = (1 + (gradient_ray ** 2))
#print "a", a
b = (2 * intercept_ray * gradient_ray) - (2 * ccc)
#print "b", b
c = (ccc ** 2) + (intercept_ray ** 2) - (spherical_surface[6] ** 2)
#print "c", c
#Actual Quadratic - using if statement if concace or convex lens
if spherical_surface[6] > 0:
intercept_plane_x = (- b - numpy.sqrt((b ** 2) - (4 * a * c))) / (2 * a)
#print "intercept_plane_x", intercept_plane_x
else:
intercept_plane_x = (- b + numpy.sqrt((b ** 2) + (4 * a * c))) / (2 * a)
#y coordiante
intercept_plane_y = (intercept_plane_x * gradient_ray) + intercept_ray
#print "intercept_plane_y", intercept_plane_y
#Angle of refraction calculations
##Working out a surface normal to the circle at point of intersection
###Gradient from point of intersection to centre of the circle
gradient_radius = (0 - intercept_plane_y) / (ccc - intercept_plane_x)
#print "gradient_radius", gradient_radius
###Gradient normal
gradient_normal = - 1 / gradient_radius
#print "gradient_normal", gradient_normal
###Angle of the normal "plane"
angle_normalplane = numpy.arctan(1/gradient_normal)
#angle_normalplace = numpy.arctan(- 1 / ((ccc - intercept_plane_x)/(0 - intercept_plane_y)))
#numpy.arctan((planar_surface[2]-planar_surface[0])/(planar_surface[3]-planar_surface[1]))
#print "angle_normalplane", angle_normalplane
##Angle refraction calculation now
angle_incidence = incident_rays[:,2] - (numpy.pi / 2) - angle_normalplane
#print "angle_incidence", angle_incidence
angle_refracted = numpy.arcsin((spherical_surface[4]*numpy.sin(angle_incidence))/spherical_surface[5])
#print "angle_refracted", angle_refracted
#print angle_refracted
angle_refracted = angle_refracted + (numpy.pi/2.0) + angle_normalplane
#print "angle_refracted", angle_refracted
#Outputting final array for refracted rays
final_ray = numpy.array([intercept_plane_x, intercept_plane_y, angle_refracted]) #the rays are shown vertically
#Next two lines of code are to make it look the same as the initial incident rays array
final_ray = numpy.rot90(final_ray, 3)
refracted_rays = numpy.fliplr(final_ray)
"""
print "refracted_rays:"
print refracted_rays
"""
return refracted_rays
#Task 2
def refraction_2d_det (incident_rays, x_det):
'''[FUCKING EDIT ME] A function which can calculate the location of a refracted ray and also
the angle the refracted ray makes with the vertical. Input requires is an
array for both incident rays and planar surface.'''
#Creating the final array
refracted_rays = numpy.zeros(incident_rays.shape, dtype=float)
#gradients
#incident ray
gradient_ray = numpy.tan((numpy.pi/2.0) - incident_rays[:,2])
#y-intercepts
#incident ray
intercept_ray = incident_rays[:,1] - (gradient_ray * incident_rays[:, 0])
#point of intersection with plane
#y-coord
y_intersect_plane = (gradient_ray * x_det) + intercept_ray
#print y_intersect_plane
#Outputting final array for refracted rays
refracted_rays[:,0] = x_det
refracted_rays[:,1] = y_intersect_plane
"""
print "refracted_rays:"
print refracted_rays
"""
return refracted_rays
#Task 3
def trace_2d (incident_rays, surface_list):
"""Work"""
#Final array storage
refracted_ray_paths = numpy.zeros((len(surface_list), len(incident_rays), 3), dtype=float)
#print len(surface_list)
refracted_ray_paths[0] = incident_rays
#print refracted_ray_paths
sl = surface_list
#number_times was used as a code verifier i.e. is it running?
number_times = 0
#For loop which goes through every surface specified by surface_list
for i in range(len(surface_list)):
#print i
#print len(i)
#surface_type = i[:][0]
surface_type = surface_list[i][0]
#print surface_type
surface_information = surface_list[i][1]
#print surface_information
#print i[1]
#Making sure the code pulls the previous ray
if i == 0 :
incident_rays = incident_rays
else:
incident_rays = refracted_ray_paths[i-1]
if surface_type == 'PLA': #Planar surface
refracted_ray_paths[i] = refraction_2d(incident_rays,surface_list[i][1])
#print refracted_ray_paths
#print len(i)
#print "success PLA"
number_times = number_times + 1
if surface_type == 'SPH': #Spherical surface
refracted_ray_paths[i] = refraction_2d_sph(incident_rays,surface_list[i][1])
#print refracted_ray_paths
#print len(i)
#print "success SPH"
number_times = number_times + 1
if surface_type == 'DET': #Detector surface
refracted_ray_paths[i] = refraction_2d_det(incident_rays,surface_list[i][1])
#print refracted_ray_paths
#print len(i)
#print "success DET"
number_times = number_times + 1
break #I'M BREAKING FREE
#print "hey look", number_times
"""
else:
if surface_type == 'SPH':
print i[:][0]
refraction_2d_sph(incident_rays,surface_information)
print "success2"
number_times = number_times + 1
"""
#print surface_type
#print surface_information
#print "arse"
#while surface_type != 'DET':
#if len(surface_list) > 1:
#length = len(surface_list)
#print length
#print refracted_ray_paths
#print this_mod(refracted_rays)
#refracted_ray_paths = numpy.zeros(refracted_rays.shape, dtype=float)
return refracted_ray_paths
#Task 4
def plot_trace_2d (incident_rays, refracted_ray_paths, surface_list):
"""work"""
#Making sure things are defined properly
refracted_ray_paths = trace_2d(incident_rays,surface_list)
rrp = refracted_ray_paths
print rrp
surface_list = surface_list
fig1 = pyplot.figure()
#Plotting the various surfaces and ray paths
for i in range(len(surface_list)):
if surface_list[i][0] == 'SPH': #Plotting spherical surface
#print surface_list[i][1][6]
#ax1 = fig1.add_subplot(111, aspect='equal')
"""
ax1.add_patch(
patches.Circle(
(refraction_2d_sph.ccc, 0), # (x,y)
surface_list[i][1][6], # radius
fill = False
)
)"""
#print surface_list[i][1][0], "asdjhkasnd"
ss = surface_list[i][1]
circle_centre_c = ss[2] + (ss[6] / numpy.abs(ss[6])) * numpy.sqrt(((ss[6] ** 2) - (ss[1] ** 2)))
refraction_2d_sph.ccc = circle_centre_c
#ax2 = fig1.add_subplot(111, aspect='equal')
x = numpy.linspace((circle_centre_c - surface_list[i][1][6]),surface_list[i][1][0],100)
y = numpy.sqrt((surface_list[i][1][6] ** 2) - ((x - circle_centre_c) ** 2))
pyplot.plot(refraction_2d_sph.ccc, 0)
#print refraction_2d_sph.ccc
#print surface_list[i][1][6]
pyplot.plot(x,(numpy.sqrt((surface_list[i][1][6] ** 2) - ((x - refraction_2d_sph.ccc) | |
<gh_stars>1-10
###############################################################
# ubervotebot is a bot made for Telegram and was written by
# <NAME>. It helps you manage polls and show the
# results in a variety of formats. This project was built
# ontop of @yukuku's telebot project.
###############################################################
import StringIO
import json
import logging
import random
import math
import urllib
import urllib2
# for sending images
from PIL import Image, ImageDraw, ImageFont
import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
with open('TOKEN') as f:
TOKEN = f.read()
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
STATE_DEFAULT = None
STATE_CREATE_POLL_CHOOSE_QUESTION = 'CREATE_POLL_CHOOSE_QUESTION'
STATE_CREATE_POLL_ADD_ANSWER = 'CREATE_POLL_ADD_ANSWER'
STATE_CREATE_POLL_CHOOSE_NUMBER_OF_ANSWERS = 'CREATE_POLL_CHOOSE_NUMBER_OF_ANSWERS'
STATE_DELETE_POLL = 'DELETE_POLL'
STATE_DELETE_POLL_CONFIRM = 'DELETE_POLL_CONFIRM'
STATE_RESULT_CHOOSE_POLL = 'RESULT_CHOOSE_POLL'
STATE_RESULT_CHOOSE_TYPE = 'RESULT_CHOOSE_TYPE'
RESULT_TYPE_LIST = 'list names'
RESULT_TYPE_NUMBERS = 'only count votes'
RESULT_TYPE_GRID = 'grid (like a doodle)'
RESULT_TYPE_BARS = 'bars'
# ================================
class User(ndb.Model):
id = ndb.IntegerProperty()
name = ndb.StringProperty()
surname = ndb.StringProperty()
activePoll = ndb.StringProperty() # the poll id the user is modifying at the moment
activeState = ndb.StringProperty() # what operation is the user currently in
polls = ndb.TextProperty() # stores polls and answers in json: [{...},{...}]
def init(self):
# load
if not self.polls:
self.polls_arr = []
else:
self.polls_arr = json.loads(self.polls)
@classmethod
def get(cls, user_obj=None, id=None):
'''user_obj is the telegram user object that will get used for the id, and when a new user is created.
Use id alternatively'''
if user_obj:
u = cls.query().filter(ndb.GenericProperty('id') == user_obj.get('id')).get()
if not u:
u = User(name=user_obj.get('first_name'), id=user_obj.get('id'), surname=user_obj.get('surname'))
u.init()
return u
elif id:
u = cls.query().filter(ndb.GenericProperty('id') == id).get()
if u:
u.init()
return u
# nothing was found or could be created
return None
@classmethod
def create_random_poll_id(cls):
o = []
while len(o) < 5:
c = random.randrange(ord('A'), ord('Z') + 1)
o.append(chr(c))
return ''.join(o)
def create_valid_poll_id(self):
'''Generates poll ids until we have found a valid one'''
taken_ids = list(map(lambda x: x.get('id'), self.polls_arr))
next_id = User.create_random_poll_id()
while next_id in taken_ids:
next_id = User.create_random_poll_id()
return next_id
# Find an existing poll
def get_poll(self, id):
for poll in self.polls_arr:
if poll.get('id') == id:
return poll
return None
def get_active_poll(self):
return self.get_poll(self.activePoll)
def delete_active_poll(self):
if self.activePoll:
self.polls_arr.remove(self.get_active_poll())
self.activePoll = None
def get_active_poll_answers(self):
return self.get_active_poll()['answers']
def get_name(self):
'''Pretty print name'''
o = self.name
if self.surname:
o += ' ' + self.surname
return o
# Create and store a new poll
def new_poll(self):
poll = {'id': self.create_valid_poll_id()}
# Initialize arrays, so we can append stuff later
poll['answers'] = []
poll['answered'] = []
poll['owner'] = self.id
self.polls_arr.append(poll)
return poll
def serialize(self):
self.polls = json.dumps(self.polls_arr)
self.put()
# ================================
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
# Return an inline keyboard for a poll
def get_poll_inline_keyboard(poll, share_button=False):
keys = '[]'
if poll['answers']:
keys = '['
# iterate over answers
for i in range(len(poll['answers'])):
answer = poll['answers'][i]
data = str(poll['owner']) + ';' + str(poll['id']) + ';' + str(i)
# Count how often answer at index i was voted for
voted = 0
for user_answer in poll['answered']:
if user_answer['chosen_answers'] >> i & 1:
voted += 1
keys += '[{"text": "'+answer+' - '+str(voted)+'", "callback_data": "'+data+'"}],'
if share_button:
keys += '[{"text": "share", "switch_inline_query": "'+poll.get('id')+'"}],'
keys = keys[:-1] + ']' # removes the last comma
return '{"inline_keyboard": '+keys+'}'
def telegram_method(name, keyvalues):
# encode strings
encoded = {}
for key in keyvalues:
encoded[key] = keyvalues[key].encode('utf-8')
try:
resp = urllib2.urlopen(BASE_URL + name, urllib.urlencode(encoded)).read()
logging.info(name+' response:')
logging.info(resp)
except Exception, e:
logging.warn(e)
def send_image(img, chat_id, caption=''):
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('caption', caption),
('reply_markup', '{"hide_keyboard": true}')
], [
('photo', 'image.png', img),
])
def count_binary_ones(n):
ones = 0
# number is 0 -> no bits to check
if n == 0:
return 0
# max number of bits we need to check: int(math.log(n, 2))+1
for i in range(int(math.log(n, 2))+1):
if n >> i & 1:
ones += 1
return ones
# HANDLE INLINE QUERY
if 'inline_query' in body:
query = body['inline_query']
inline_query_id = query['id']
def send_inline_query_poll_result(poll):
infos = {
'inline_query_id': str(inline_query_id),
'switch_pm_text': 'Create new poll',
'switch_pm_parameter': 'new'
}
if poll:
infos['results'] = '[{"type": "article", "id": "'+poll.get('id')+'", "title": "Click here to send poll", "description": "'+poll['question']+'", "thumb_url": "https://raw.githubusercontent.com/haselkern/ubervotebot/master/gfx/botpic.png", "input_message_content": {"message_text": "'+poll['question']+'"}, "reply_markup": '+get_poll_inline_keyboard(poll)+'}]'
telegram_method('answerInlineQuery', infos)
# find User
user = User.get(query['from'])
user.serialize()
# find poll
query_str = query['query']
poll = user.get_poll(query_str)
send_inline_query_poll_result(poll)
# HANDLE CALLBACK_QUERY (from inline keyboards)
elif 'callback_query' in body:
# to send an update we need: (message_id and chat_id) or (inline_message_id)
inline_message_id = None
try:
message = body['callback_query']['message']
message_id = message.get('message_id')
chat_id = message['chat'].get('id')
except:
inline_message_id = body['callback_query'].get('inline_message_id')
data = body['callback_query']['data']
user = User.get(body['callback_query']['from'])
user.serialize()
# sends a short status that the user will see for a few seconds on the top of the screen
def ticker(msg):
telegram_method('answerCallbackQuery', {
'callback_query_id': str(body['callback_query']['id']),
'text': msg
})
def update_keyboard(poll):
# only show a share button in the chat with the bot
share_button = not 'inline_message_id' in body['callback_query']
infos = {
'text': poll['question'],
'reply_markup': get_poll_inline_keyboard(poll, share_button)
}
if inline_message_id:
infos['inline_message_id'] = inline_message_id
else:
infos['chat_id'] = str(chat_id)
infos['message_id'] = str(message_id)
telegram_method('editMessageText', infos)
data = data.split(';')
data[0] = int(data[0])
data[2] = int(data[2])
try:
# find user the poll belongs to
poll_owner = User.get(id=data[0])
# find poll object
poll = poll_owner.get_poll(data[1])
if not poll:
ticker('This poll is no longer active')
return
# get user answer
user_answer = None
for ua in poll['answered']:
if ua.get('user_id') == user.id:
user_answer = ua
if not user_answer:
# append new user
user_answer = {'user_id': user.id, 'chosen_answers': 0}
poll['answered'].append(user_answer)
# chosen_answers is an integer where the bits represent if an answer was chosen or not.
# the rightmost bit represents the answer with index 0
# old answers
ua = user_answer['chosen_answers']
# toggled bit, represents new answers
ua_next = ua ^ (1 << data[2])
# too many answers
if count_binary_ones(ua_next) > poll['max_answers']:
ticker('You cannot select more than ' + str(poll['max_answers']) + ' answers.')
# everything okay, save
else:
user_answer['chosen_answers'] = ua_next
# send feedback
selected_answer = poll['answers'][data[2]]
if ua_next > ua:
ticker('You voted for: ' + selected_answer)
else:
ticker('You took your vote back.')
# update poll display
update_keyboard(poll)
# save poll
poll_owner.serialize()
except Exception, e:
# This exception occurs when we send an update that doesn't change the message or its keyboard
# (or something unforeseen happens)
logging.exception(e)
elif 'chosen_inline_result' in body:
# whatever this is, probably something important
pass
# HANDLE MESSAGES AND COMMANDS
else:
try:
message = body['message']
except:
logging.error('No message found on body: ' + str(body))
return
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message['from']
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
def reply(msg, keyboard='{"hide_keyboard": true}'):
telegram_method('sendMessage', {
'chat_id': str(chat_id),
'text': msg,
'disable_web_page_preview': 'true',
'reply_markup': keyboard
})
def send_action_photo():
'''Sets status "sending picture" for this bot.'''
telegram_method('sendChatAction', {
'chat_id': str(chat_id),
'action' : 'upload_photo'
})
# get User
user = User.get(fr)
def get_polls_keyboard():
keys = '['
for poll in user.polls_arr:
s = poll['id'] + ": " + poll['question']
keys += '["'+s+'"],'
keys = keys[:-1] + ']'
return '{"keyboard": '+keys+', "one_time_keyboard": true, "resize_keyboard": true}'
if user.activeState == STATE_DEFAULT:
if text == '/start':
# show help
with open("help.txt", "r") as f:
reply(f.read())
elif text == '/new' or text == '/start new':
reply('Okay, a new poll. What should the question be?')
user.activeState = STATE_CREATE_POLL_CHOOSE_QUESTION
elif text == '/delete':
if len(user.polls_arr) > 0:
reply('Choose a poll to delete or /cancel', keyboard=get_polls_keyboard())
user.activeState = STATE_DELETE_POLL
else:
reply('You have no polls to delete.')
elif text == '/results':
if len(user.polls_arr) > 0:
reply('Choose a poll to show results from or /cancel', keyboard=get_polls_keyboard())
user.activeState = STATE_RESULT_CHOOSE_POLL
else:
reply('You have no polls you could show results from. Create one with /new')
else:
# show help
with open('help.txt', 'r') as f:
reply(f.read())
elif user.activeState == STATE_RESULT_CHOOSE_POLL:
if text == '/cancel':
user.activeState = STATE_DEFAULT
reply('Okay, no results will be shown.')
elif text.startswith('/'):
reply('Unrecognized | |
<filename>config_system/generator/generate.py
# Copyright (c) 2015 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import sys
import os
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '../utils'))
import argparse
import json
import re
import string
import config_reader
import selection
import function_defined
import assign_func
import rich_template
import file_utils
IDENTIFIER_REGEX = '\\A[A-Za-z][A-Za-z0-9_]{0,127}\\Z'
class GenState(object):
def __init__ (self):
self._subst = {}
self._config_options = []
self._constants = []
self._platform_includes = []
self._aprinter_includes = set()
self._objects = {}
self._singleton_objects = {}
self._finalize_actions = []
self._global_code = []
self._init_calls = []
self._final_init_calls = []
self._global_resources = []
self._modules_exprs = []
self._extra_sources = []
self._need_millisecond_clock = False
self._have_hw_millisecond_clock = False
self._defines = []
self._include_dirs = []
def add_subst (self, key, val, indent=-1):
self._subst[key] = {'val':val, 'indent':indent}
def add_config (self, name, dtype, value, is_constant=False, is_complex=False):
properties = []
if is_constant:
properties.append('ConfigPropertyConstant')
properties_str = 'ConfigProperties<{}>'.format(', '.join(properties))
if dtype == 'double':
config_option_str = 'APRINTER_CONFIG_OPTION_DOUBLE({}, {}, {})'.format(name, value, properties_str)
elif is_complex:
config_option_str = 'APRINTER_CONFIG_OPTION_COMPLEX({}, {}, APRINTER_WRAP_COMPLEX_VALUE({}, {}), {})'.format(name, dtype, dtype, value, properties_str)
else:
config_option_str = 'APRINTER_CONFIG_OPTION_SIMPLE({}, {}, {}, {})'.format(name, dtype, value, properties_str)
self._config_options.append(config_option_str)
return name
def add_float_config (self, name, value, **kwargs):
return self.add_config(name, 'double', format_cpp_float(value), **kwargs)
def add_bool_config (self, name, value, **kwargs):
return self.add_config(name, 'bool', 'true' if value else 'false', **kwargs)
def add_mac_addr_config (self, name, value, **kwargs):
assert len(value) == 6
val_str = '(ConfigTypeMacAddress{{{{{}}}}})'.format(', '.join('0x{:02x}'.format(x) for x in value))
return self.add_config(name, 'ConfigTypeMacAddress', val_str, is_complex=True, **kwargs)
def add_ip_addr_config (self, name, value, **kwargs):
assert len(value) == 4
val_str = '(ConfigTypeIpAddress{{{{{}}}}})'.format(', '.join('{}'.format(x) for x in value))
return self.add_config(name, 'ConfigTypeIpAddress', val_str, is_complex=True, **kwargs)
def add_float_constant (self, name, value):
self._constants.append({'type':'using', 'name':name, 'value':'AMBRO_WRAP_DOUBLE({})'.format(format_cpp_float(value))})
return name
def add_typedef (self, name, value):
self._constants.append({'type':'using', 'name':name, 'value':value})
return name
def add_int_constant (self, dtype, name, value):
if dtype == 'int':
c_type = 'int'
c_init = str(value)
else:
m = re.match('\\A(u?)int(8|16|32|64)\\Z', dtype)
assert m
u = m.group(1)
b = m.group(2)
c_type = '{}_t'.format(dtype)
c_init = '{}INT{}_C({})'.format(u.upper(), b, value)
self._constants.append({
'type': 'static {} const'.format(c_type),
'name': name,
'value': c_init,
})
return name
def add_platform_include (self, inc_file):
self._platform_includes.append(inc_file)
def add_include (self, inc_file):
self._aprinter_includes.add(inc_file)
def add_aprinter_include (self, inc_file):
self.add_include('aprinter/'+inc_file)
def register_objects (self, kind, config, key):
if kind not in self._objects:
self._objects[kind] = {}
for obj_config in config.iter_list_config(key, max_count=20):
name = obj_config.get_string('Name')
if name in self._objects[kind]:
obj_config.path().error('Duplicate {} name'.format(kind))
self._objects[kind][name] = obj_config
def get_object (self, kind, config, key):
name = config.get_string(key)
if kind not in self._objects or name not in self._objects[kind]:
config.key_path(key).error('Nonexistent {} specified'.format(kind))
return self._objects[kind][name]
def register_singleton_object (self, kind, value):
assert kind not in self._singleton_objects
self._singleton_objects[kind] = value
return value
def get_singleton_object (self, kind, allow_none=False):
have = kind in self._singleton_objects
assert allow_none or have
return self._singleton_objects[kind] if have else None
def add_global_code (self, priority, code):
self._global_code.append({'priority':priority, 'code':code})
def add_isr (self, isr):
self.add_global_code(-1, isr)
def add_init_call (self, priority, init_call):
self._init_calls.append({'priority':priority, 'init_call':init_call})
def add_final_init_call (self, priority, init_call):
self._final_init_calls.append({'priority':priority, 'init_call':init_call})
def add_finalize_action (self, action):
self._finalize_actions.append(action)
def add_global_resource (self, priority, name, expr, context_name=None, code_before=None, code_before_program=None,
extra_program_child=None, is_fast_event_root=False, use_instance=False):
code = ''
if code_before is not None:
code += '{}\n'.format(code_before)
if use_instance:
code += 'APRINTER_MAKE_INSTANCE({}, ({}))\n'.format(name, expr.build(indent=0))
else:
code += 'using {} = {};\n'.format(name, expr.build(indent=0))
self._global_resources.append({
'priority': priority,
'name': name,
'context_name':context_name,
'code': code,
'code_before_program': code_before_program,
'extra_program_child': extra_program_child,
'is_fast_event_root': is_fast_event_root,
})
def add_module (self):
index = len(self._modules_exprs)
self._modules_exprs.append(None)
return GenPrinterModule(self, index)
def add_extra_source (self, base, path):
self._extra_sources.append({'base': base, 'path': path})
def set_need_millisecond_clock (self):
self._need_millisecond_clock = True
def set_have_hw_millisecond_clock (self):
self._have_hw_millisecond_clock = True
def add_define (self, name, value=''):
self._defines.append({'name': name, 'value': str(value)})
def add_include_dir (self, base, path):
self._include_dirs.append({'base': base, 'path': path})
def finalize (self):
for action in reversed(self._finalize_actions):
action()
for so in self._singleton_objects.itervalues():
if hasattr(so, 'finalize'):
so.finalize()
global_resources = sorted(self._global_resources, key=lambda x: x['priority'])
program_children = []
program_children.extend(gr['name'] for gr in global_resources)
program_children.extend(gr['extra_program_child'] for gr in global_resources if gr['extra_program_child'] is not None)
self.add_subst('GENERATED_WARNING', 'WARNING: This file was automatically generated!')
self.add_subst('EXTRA_CONSTANTS', ''.join('{} {} = {};\n'.format(c['type'], c['name'], c['value']) for c in self._constants))
self.add_subst('ConfigOptions', ''.join('{}\n'.format(c) for c in self._config_options))
self.add_subst('PLATFORM_INCLUDES', ''.join('#include <{}>\n'.format(inc) for inc in self._platform_includes))
self.add_subst('AprinterIncludes', ''.join('#include <{}>\n'.format(inc) for inc in sorted(self._aprinter_includes)))
self.add_subst('GlobalCode', ''.join('{}\n'.format(gc['code']) for gc in sorted(self._global_code, key=lambda x: x['priority'])))
self.add_subst('InitCalls', ''.join(' {}\n'.format(ic['init_call']) for ic in sorted(self._init_calls, key=lambda x: x['priority'])))
self.add_subst('GlobalResourceExprs', ''.join(gr['code'] for gr in global_resources))
self.add_subst('GlobalResourceContextAliases', ''.join(' using {} = ::{};\n'.format(gr['context_name'], gr['name']) for gr in global_resources if gr['context_name'] is not None))
self.add_subst('GlobalResourceProgramChildren', ',\n'.join(' {}'.format(pc_name) for pc_name in program_children))
self.add_subst('GlobalResourceInit', ''.join(' {}::init(c);\n'.format(gr['name']) for gr in global_resources))
self.add_subst('FinalInitCalls', ''.join(' {}\n'.format(ic['init_call']) for ic in sorted(self._final_init_calls, key=lambda x: x['priority'])))
self.add_subst('CodeBeforeProgram', ''.join('{}\n'.format(gr['code_before_program']) for gr in global_resources if gr['code_before_program'] is not None))
def get_subst (self):
res = {}
for (key, subst) in self._subst.iteritems():
val = subst['val']
indent = subst['indent']
res[key] = val if type(val) is str else val.build(indent)
return res
class GenPrinterModule(object):
def __init__ (self, gen, index):
self._gen = gen
self._index = index
@property
def index (self):
return self._index
def set_expr (self, expr):
self._gen._modules_exprs[self._index] = expr
class GenConfigReader(config_reader.ConfigReader):
def get_int_constant (self, key):
return str(self.get_int(key))
def get_bool_constant (self, key):
return 'true' if self.get_bool(key) else 'false'
def get_float_constant (self, key):
return format_cpp_float(self.get_float(key))
def get_identifier (self, key, validate=None):
val = self.get_string(key)
if not re.match(IDENTIFIER_REGEX, val):
self.key_path(key).error('Incorrect format.')
if validate is not None and not validate(val):
self.key_path(key).error('Custom validation failed.')
return val
def get_id_char (self, key):
val = self.get_string(key)
if val not in string.ascii_uppercase:
self.key_path(key).error('Incorrect format.')
return val
def get_mac_addr (self, key):
val = self.get_string(key)
br = '([0-9A-Fa-f]{1,2})'
mac_re = '\\A{}:{}:{}:{}:{}:{}\\Z'.format(br, br, br, br, br, br)
m = re.match(mac_re, val)
if not m:
self.key_path(key).error('Incorrect format.')
return [int(m.group(i), 16) for i in range(1, 7)]
def get_ip_addr (self, key):
val = self.get_string(key)
br = '([0-9]{1,3})'
ip_re = '\\A{}\\.{}\\.{}\\.{}\\Z'.format(br, br, br, br)
m = re.match(ip_re, val)
if not m:
self.key_path(key).error('Incorrect format A.')
ints = [int(m.group(i), 10) for i in range(1, 5)]
if any(d > 255 for d in ints):
self.key_path(key).error('Incorrect format B.')
return ints
def do_selection (self, key, sel_def):
for config in self.enter_config(key):
try:
result = sel_def.run(config.get_string('_compoundName'), config)
except selection.SelectionError:
config.path().error('Unknown choice.')
return result
def do_list (self, key, elem_cb, min_count=-1, max_count=-1):
elems = []
for (i, config) in enumerate(self.iter_list_config(key, min_count=min_count, max_count=max_count)):
elems.append(elem_cb(config, i))
return TemplateList(elems)
def do_keyed_list (self, count, elems_key, elem_key_prefix, elem_cb):
elems = []
elems_config = self.get_config(elems_key)
for i in range(count):
elem_config = elems_config.get_config('{}{}'.format(elem_key_prefix, i))
elems.append(elem_cb(elem_config, i))
return TemplateList(elems)
def do_enum (self, key, mapping):
val = self.get_string(key)
if val not in mapping:
self.key_path(key).error('Incorrect choice.')
return mapping[val]
class TemplateExpr(object):
def __init__ (self, name, args):
self._name = name
self._args = args
def append_arg(self, arg):
self._args.append(arg)
def build (self, indent):
if indent == -1 or len(self._args) == 0:
initiator = ''
separator = ', '
terminator = ''
child_indent = -1
else:
initiator = '\n' + ' ' * (indent + 1)
separator = ',' + initiator
terminator = '\n' + ' ' * indent
child_indent = indent + 1
return '{}<{}{}{}>'.format(self._name, initiator, separator.join(_build_template_arg(arg, child_indent) for | |
class=pluginurl align=absmiddle title="%s" src="images/pluginurl.png"></a>' %
(p.group(1).replace('"', ''), p.group(1).replace('"', '')), output)
if output.endswith(" </A>"):
output = output[:-11]
return output
def format_exception():
import traceback
return traceback.format_exc()
# Debug logging directly to the dedicated web GUI log. The log format is
# equal to the cmc.log format. The format is:
# 2015-02-09 11:42:47 [5] Started 20 cmk helpers in 1.105 ms.
# <date> <time> [<lvl>] <msg>
# the levels of the syslog format are used:
# LOG_EMERG 0 /* system is unusable */
# LOG_ALERT 1 /* action must be taken immediately */
# LOG_CRIT 2 /* critical conditions */
# LOG_ERR 3 /* error conditions */
# LOG_WARNING 4 /* warning conditions */
# LOG_NOTICE 5 /* normal but significant condition */
# LOG_INFO 6 /* informational */
# LOG_DEBUG 7 /* debug-level messages */
def logger(level, msg):
if type(msg) == unicode:
msg = msg.encode('utf-8')
elif type(msg) != str:
msg = repr(msg)
log_file = defaults.log_dir + '/web.log'
file(log_file, 'a')
aquire_lock(log_file)
try:
file(log_file, 'a').write('%s [%d] [%d] %s\n' %
(time.strftime('%Y-%m-%d %H:%M:%S'), level, os.getpid(), msg))
finally:
release_lock(log_file)
# Escape/strip unwanted chars from (user provided) strings to
# use them in livestatus queries. Prevent injections of livestatus
# protocol related chars or strings
def lqencode(s):
# It is not enough to strip off \n\n, because one might submit "\n \n",
# which is also interpreted as termination of the last query and beginning
# of the next query.
return s.replace('\n', '')
def saveint(x):
try:
return int(x)
except:
return 0
def tryint(x):
try:
return int(x)
except:
return x
def isint(i):
try:
int(i)
return True
except:
return False
def set_is_disjoint(a, b):
for elem in a:
if elem in b:
return False
return True
# Functions for locking files. All locks must be freed if a request
# has terminated (in good or in bad manner). Currently only exclusive
# locks are implemented and they always will wait for ever.
g_aquired_locks = []
g_locked_paths = []
def aquire_lock(path):
if path in g_locked_paths:
return # No recursive locking
# Create file (and base dir) for locking if not existant yet
make_nagios_directory(os.path.dirname(path))
fd = os.open(path, os.O_RDONLY | os.O_CREAT)
# Handle the case where the file has been renamed in the meantime
while True:
fcntl.flock(fd, fcntl.LOCK_EX)
fd_new = os.open(path, os.O_RDONLY | os.O_CREAT)
if os.path.sameopenfile(fd, fd_new):
os.close(fd_new)
break
else:
os.close(fd)
fd = fd_new
g_aquired_locks.append((path, fd))
g_locked_paths.append(path)
def release_lock(path):
if path not in g_locked_paths:
return # no unlocking needed
for lock_path, fd in g_aquired_locks:
if lock_path == path:
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
g_aquired_locks.remove((lock_path, fd))
g_locked_paths.remove(path)
def have_lock(path):
return path in g_locked_paths
def release_all_locks():
global g_aquired_locks, g_locked_paths
for path, fd in g_aquired_locks:
os.close(fd)
g_aquired_locks = []
g_locked_paths = []
regex_cache = {}
def regex(r):
rx = regex_cache.get(r)
if rx:
return rx
try:
rx = re.compile(r)
except Exception, e:
raise MKConfigError(_("Invalid regular expression '%s': %s") % (r, e))
regex_cache[r] = rx
return rx
def escape_regex_chars(text):
escaped = ""
for c in text:
if c in '().^$[]{}+*\\':
escaped += '\\'
escaped += c
return escaped
# Splits a word into sequences of numbers and non-numbers.
# Creates a tuple from these where the number are converted
# into int datatype. That way a naturual sort can be
# implemented.
def num_split(s):
parts = []
for part in re.split('(\d+)', s):
try:
parts.append(int(part))
except ValueError:
parts.append(part)
return tuple(parts)
def cmp_service_name_equiv(r):
if r == "Check_MK":
return -6
elif r == "Check_MK Agent":
return -5
elif r == "Check_MK Discovery":
return -4
elif r == "Check_MK inventory":
return -3 # FIXME: Remove old name one day
elif r == "Check_MK HW/SW Inventory":
return -2
else:
return 0
def cmp_version(a, b):
if a == None or b == None:
return cmp(a, b)
aa = map(tryint, a.split("."))
bb = map(tryint, b.split("."))
return cmp(aa, bb)
def frexpb(x, base):
exp = int(math.log(x, base))
mantissa = x / base**exp
if mantissa < 1:
mantissa *= base
exp -= 1
return mantissa, exp
def frexp10(x):
return frexpb(x, 10)
def render_scientific(v, precision=3):
if v == 0:
return "0"
elif v < 0:
return "-" + render_scientific(v*-1, precision)
mantissa, exponent = frexp10(float(v))
# Render small numbers without exponent
if exponent >= -3 and exponent <= 4:
return "%%.%df" % max(0, precision - exponent) % v
return "%%.%dfe%%d" % precision % (mantissa, exponent)
# Render a physical value witha precision of p
# digits. Use K (kilo), M (mega), m (milli), µ (micro)
# p is the number of non-zero digits - not the number of
# decimal places.
# Examples for p = 3:
# a: 0.0002234 b: 4,500,000 c: 137.56
# Result:
# a: 223 µ b: 4.50 M c: 138
# Note if the type of v is integer, then the precision cut
# down to the precision of the actual number
def physical_precision(v, precision, unit_symbol):
if v < 0:
return "-" + physical_precision(-v, precision, unit_symbol)
scale_symbol, places_after_comma, scale_factor = calculate_physical_precision(v, precision)
scaled_value = float(v) / scale_factor
return (u"%%.%df %%s%%s" % places_after_comma) % (scaled_value, scale_symbol, unit_symbol)
def physical_precision_list(values, precision, unit_symbol):
if not values:
reference = 0
else:
reference = min([ abs(v) for v in values ])
scale_symbol, places_after_comma, scale_factor = calculate_physical_precision(reference, precision)
units = []
scaled_values = []
for value in values:
scaled_value = float(value) / scale_factor
scaled_values.append(("%%.%df" % places_after_comma) % scaled_value)
return "%s%s" % (scale_symbol, unit_symbol), scaled_values
def calculate_physical_precision(v, precision):
if v == 0:
return "", precision - 1, 1
# Splitup in mantissa (digits) an exponent to the power of 10
# -> a: (2.23399998, -2) b: (4.5, 6) c: (1.3756, 2)
mantissa, exponent = frexp10(float(v))
if type(v) == int:
precision = min(precision, exponent + 1)
# Choose a power where no artifical zero (due to rounding) needs to be
# placed left of the decimal point.
scale_symbols = {
-5 : "f",
-4 : "p",
-3 : "n",
-2 : u"µ",
-1 : "m",
0 : "",
1 : "K",
2 : "M",
3 : "G",
4 : "T",
5 : "P",
}
scale = 0
while exponent < 0 and scale > -5:
scale -= 1
exponent += 3
# scale, exponent = divmod(exponent, 3)
places_before_comma = exponent + 1
places_after_comma = precision - places_before_comma
while places_after_comma < 0 and scale < 5:
scale += 1
exponent -= 3
places_before_comma = exponent + 1
places_after_comma = precision - places_before_comma
return scale_symbols[scale], places_after_comma, 1000 ** scale
def nic_speed_human_readable(bits_per_second):
if bits_per_second == 10000000:
return "10 Mbit/s"
elif bits_per_second == 100000000:
return "100 Mbit/s"
elif bits_per_second == 1000000000:
return "1 Gbit/s"
elif bits_per_second < 1500:
return "%d bit/s" % bits_per_second
elif bits_per_second < 1000000:
return "%s Kbit/s" % drop_dotzero(bits_per_second / 1000.0, digits=1)
elif bits_per_second < 1000000000:
return "%s Mbit/s" % drop_dotzero(bits_per_second / 1000000.0, digits=2)
else:
return "%s Gbit/s" % drop_dotzero(bits_per_second / 1000000000.0, digits=2)
# Converts a number into a floating point number
# and drop useless zeroes at the end of the fraction
# 45.1 -> "45.1"
# 45.0 -> "45"
def drop_dotzero(v, digits=2):
t = "%%.%df" % digits % v
if "." in t:
return t.rstrip("0").rstrip(".")
else:
return t
# Renders a floating point number with the given number
# of non-zero digits. Example if precision is 3:
# 12.40349034 -> 12.4
# 1.23894859348563478 -> 1.24
# 0.00001239898568978 -> 0.0000124
# 12400000.00230923 -> 12400000
def render_float_with_precision(value, precision):
if value == 0:
return "0"
elif value < 0:
return "-" + render_float_with_precision(-value, precision)
mantissa, exponent = frexp10(float(value))
# exponent + 1 is the number of digits left of the .
# Digits left of . are more than precision -> no fraction.
if exponent + 1 >= precision:
return "%.0f" % value
# Allow so many digits after comma that we have at least 'precision'
# valid non-zero digits
else:
digits = precision - exponent - 1
return "%%.%df" % digits % value
def number_human_readable(n, precision=1, unit="B"):
base = 1024.0
if unit == "Bit":
base = 1000.0
n = float(n)
f = "%." + str(precision) + "f"
if abs(n) > base * base * base:
return (f + "G%s") % (n / (base * base * base), unit)
elif abs(n) > base * base:
return (f + "M%s") % (n / (base * base), unit)
elif abs(n) > base:
return (f + "k%s") % (n / base, unit)
else:
return (f + "%s") % (n, unit)
def percent_human_redable(perc, precision=2, drop_zeroes=True):
| |
revision history.
Reference RFC 7231, Section 6.5.8
Keyword Args:
description (str): Human friendly description of the error.
title (str): Error title (default '409 Conflict')
headers (dict): A dict of header names and values to set.
href (str): An href that can be used for more information.
"""
def __init__(self, *args, **kwargs):
super().__init__(409, *args, **kwargs)
class HTTPGone(HTTPError):
"""410 Gone.
The 410 Gone status code indicates that access to the target
resource is no longer available at the origin server and that this
condition is likely to be permanent. If the origin server does not
know, or has no facility to determine, whether or not the condition
is permanent, the status code 404 (Not Found) ought to be used
instead.
The 410 response is primarily intended to assist the task of web
maintenance by notifying the recipient that the resource is
intentionally unavailable and that the server owners desire that
remote links to that resource be removed. Such an event is common
for limited-time, promotional services and for resources belonging to
individuals no longer associated with the origin server's site. It
is not necessary to mark all permanently unavailable resources as
"gone" or to keep the mark for any length of time -- that is left to
the discretion of the server owner.
A 410 response is cacheable by default; i.e., unless otherwise
indicated by the method definition or explicit cache controls (see
Section 4.2.2 of RFC7234).
Reference RFC 7231, Section 6.5.9
Keyword Args:
description (str): Human friendly description of the error.
title (str): Error title (default '410 Gone')
headers (dict): A dict of header names and values to set.
href (str): An href that can be used for more information.
"""
def __init__(self, *args, **kwargs):
super().__init__(410, *args, **kwargs)
class HTTPLengthRequired(HTTPError):
"""411 Length Required.
The 411 Length Required status code indicates that the server
refuses to accept the request without a defined Content-Length
(Section 3.3.2 of RFC7230). The client MAY repeat the request if
it adds a valid Content-Length header field containing the length of
the message body in the request message.
Reference RFC 7231, Section 6.5.10
Keyword Args:
description (str): Human friendly description of the error.
title (str): Error title (default '411 Length Required')
headers (dict): A dict of header names and values to set.
href (str): An href that can be used for more information.
"""
def __init__(self, description='Content-Length header required.',
**kwargs):
super().__init__(411, description, **kwargs)
class HTTPPreconditionFailed(HTTPError):
"""412 Precondition Failed.
The 412 Precondition Failed status code indicates that one or more
conditions given in the request header fields evaluated to false when
tested on the server. This response code allows the client to place
preconditions on the current resource state (its current
representations and metadata) and, thus, prevent the request method
from being applied if the target resource is in an unexpected state.
Reference RFC 7232, Section 4.2
Keyword Args:
description (str): Human friendly description of the error.
title (str): Error title (default '412 Precondition Failed')
headers (dict): A dict of header names and values to set.
href (str): An href that can be used for more information.
"""
def __init__(self, *args, **kwargs):
super().__init__(412, *args, **kwargs)
class HTTPPayloadTooLarge(HTTPError):
"""413 Payload Too Large.
The 413 Payload Too Large status code indicates that the server is
refusing to process a request because the request payload is larger
than the server is willing or able to process. The server MAY close
the connection to prevent the client from continuing the request.
If the condition is temporary, the server SHOULD generate a
Retry-After header field to indicate that it is temporary and after
what time the client MAY try again.
Reference RFC 7231, Section 6.5.11
Keyword Args:
description (str): Human friendly description of the error.
title (str): Error title (default '413 Payload Too Large')
headers (dict): A dict of header names and values to set.
href (str): An href that can be used for more information.
"""
def __init__(self, *args, **kwargs):
super().__init__(413, *args, **kwargs)
class HTTPUriTooLong(HTTPError):
"""414 URI Too Long.
The 414 URI Too Long status code indicates that the server is
refusing to service the request because the request-target (Section
5.3 of RFC7230) is longer than the server is willing to interpret.
This rare condition is only likely to occur when a client has
improperly converted a POST request to a GET request with long query
information, when the client has descended into a "black hole" of
redirection (e.g., a redirected URI prefix that points to a suffix of
itself) or when the server is under attack by a client attempting to
exploit potential security holes.
Reference RFC 7231, Section 6.5.12
Keyword Args:
description (str): Human friendly description of the error.
title (str): Error title (default '414 URI Too Long')
headers (dict): A dict of header names and values to set.
href (str): An href that can be used for more information.
"""
def __init__(self, *args, **kwargs):
super().__init__(414, *args, **kwargs)
class HTTPUnsupportedMediaType(HTTPError):
"""415 Unsupported Media Type.
The 415 (Unsupported Media Type) status code indicates that the
origin server is refusing to service the request because the payload
is in a format not supported by this method on the target resource.
The format problem might be due to the request's indicated
Content-Type or Content-Encoding, or as a result of inspecting the
data directly.
Reference RFC 7231, Section 6.5.13
Keyword Args:
description (str): Human friendly description of the error.
title (str): Error title (default '415 Unsupported Media Type.')
headers (dict): A dict of header names and values to set.
href (str): An href that can be used for more information.
"""
def __init__(self, *args, **kwargs):
super().__init__(415, *args, **kwargs)
class HTTPRangeNotSatisfiable(HTTPError):
"""416 Range Not Satisfiable.
The 416 (Range Not Satisfiable) status code indicates that none of
the ranges in the request's Range header field (Section 3.1) overlap
the current extent of the selected resource or that the set of ranges
requested has been rejected due to invalid ranges or an excessive
request of small or overlapping ranges.
For byte ranges, failing to overlap the current extent means that the
first-byte-pos of all of the byte-range-spec values were greater than
the current length of the selected representation. When this status
code is generated in response to a byte-range request, the sender
SHOULD generate a Content-Range header field specifying the current
length of the selected representation (Section 4.2).
For example:
HTTP/1.1 416 Range Not Satisfiable
Date: Fri, 20 Jan 2012 15:41:54 GMT
Content-Range: bytes \*/47022
Note: Because servers are free to ignore Range, many
implementations will simply respond with the entire selected
representation in a 200 (OK) response. That is partly because
most clients are prepared to receive a 200 (OK) to complete the
task (albeit less efficiently) and partly because clients might
not stop making an invalid partial request until they have
received a complete representation. Thus, clients cannot depend
on receiving a 416 (Range Not Satisfiable) response even when it
is most appropriate.
Reference RFC 7233, Section 4.4
Keyword Args:
description (str): Human friendly description of the error.
title (str): Error title (default '416 Range Not Satisfiable')
headers (dict): A dict of header names and values to set.
href (str): An href that can be used for more information.
"""
def __init__(self, resource_length, description=None, title=None,
headers={}, **kwargs):
headers = {'Content-Range': 'bytes */' + str(resource_length)}
super().__init__(416, description, title, headers, **kwargs)
class HTTPUnprocessableEntity(HTTPError):
"""422 Unprocessable Entity.
The 422 (Unprocessable Entity) status code means the server
understands the content type of the request entity (hence a
415(Unsupported Media Type) status code is inappropriate), and the
syntax of the request entity is correct (thus a 400 (Bad Request)
status code is inappropriate) but was unable to process the contained
instructions. For example, this error condition may occur if an XML
request body contains well-formed (i.e., syntactically correct), but
semantically erroneous, XML instructions.
Reference RFC 4918, Section 11.2
Keyword | |
# coding=utf-8
from __future__ import print_function
import os
from six.moves import xrange as range
import math
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.utils
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from asdl.hypothesis import Hypothesis, GenTokenAction
from asdl.transition_system import ApplyRuleAction, ReduceAction, Action
from common.registerable import Registrable
from components.decode_hypothesis import DecodeHypothesis
from components.action_info import ActionInfo
from components.dataset import Batch
from common.utils import update_args, init_arg_parser
from model import nn_utils
from model.attention_util import AttentionUtil
from model.nn_utils import LabelSmoothing
from model.pointer_net import PointerNet
from transformers import BertTokenizer, BertModel
@Registrable.register('default_parser')
class Parser(nn.Module):
"""Implementation of a semantic parser
The parser translates a natural language utterance into an AST defined under
the ASDL specification, using the transition system described in https://arxiv.org/abs/1810.02720
"""
def __init__(self, args, vocab, transition_system):
super(Parser, self).__init__()
self.args = args
self.vocab = vocab
self.transition_system = transition_system
self.grammar = self.transition_system.grammar
# Embedding layers
# source token embedding
self.src_embed = nn.Embedding(len(vocab.source), args.embed_size)
# embedding table of ASDL production rules (constructors), one for each ApplyConstructor action,
# the last entry is the embedding for Reduce action
self.production_embed = nn.Embedding(len(transition_system.grammar) + 1, args.action_embed_size)
# embedding table for target primitive tokens
self.primitive_embed = nn.Embedding(len(vocab.primitive), args.action_embed_size)
# embedding table for ASDL fields in constructors
self.field_embed = nn.Embedding(len(transition_system.grammar.fields), args.field_embed_size)
# embedding table for ASDL types
self.type_embed = nn.Embedding(len(transition_system.grammar.types), args.type_embed_size)
nn.init.xavier_normal_(self.src_embed.weight.data)
nn.init.xavier_normal_(self.production_embed.weight.data)
nn.init.xavier_normal_(self.primitive_embed.weight.data)
nn.init.xavier_normal_(self.field_embed.weight.data)
nn.init.xavier_normal_(self.type_embed.weight.data)
# LSTMs
if args.lstm == 'lstm':
self.encoder_lstm = nn.LSTM(args.embed_size, int(args.hidden_size / 2), bidirectional=True)
input_dim = args.action_embed_size # previous action
# frontier info
input_dim += args.action_embed_size * (not args.no_parent_production_embed)
input_dim += args.field_embed_size * (not args.no_parent_field_embed)
input_dim += args.type_embed_size * (not args.no_parent_field_type_embed)
input_dim += args.hidden_size * (not args.no_parent_state)
input_dim += args.att_vec_size * (not args.no_input_feed) # input feeding
self.decoder_lstm = nn.LSTMCell(input_dim, args.hidden_size)
elif args.lstm == 'parent_feed':
self.encoder_lstm = nn.LSTM(args.embed_size, int(args.hidden_size / 2), bidirectional=True)
from .lstm import ParentFeedingLSTMCell
input_dim = args.action_embed_size # previous action
# frontier info
input_dim += args.action_embed_size * (not args.no_parent_production_embed)
input_dim += args.field_embed_size * (not args.no_parent_field_embed)
input_dim += args.type_embed_size * (not args.no_parent_field_type_embed)
input_dim += args.att_vec_size * (not args.no_input_feed) # input feeding
self.decoder_lstm = ParentFeedingLSTMCell(input_dim, args.hidden_size)
else:
raise ValueError('Unknown LSTM type %s' % args.lstm)
if args.no_copy is False:
# pointer net for copying tokens from source side
self.src_pointer_net = PointerNet(query_vec_size=args.att_vec_size, src_encoding_size=args.hidden_size)
# given the decoder's hidden state, predict whether to copy or generate a target primitive token
# output: [p(gen(token)) | s_t, p(copy(token)) | s_t]
self.primitive_predictor = nn.Linear(args.att_vec_size, 2)
if args.primitive_token_label_smoothing:
self.label_smoothing = LabelSmoothing(args.primitive_token_label_smoothing, len(self.vocab.primitive), ignore_indices=[0, 1, 2])
# initialize the decoder's state and cells with encoder hidden states
self.decoder_cell_init = nn.Linear(args.hidden_size, args.hidden_size)
# attention: dot product attention
# project source encoding to decoder rnn's hidden space
self.att_src_linear = nn.Linear(args.hidden_size, args.hidden_size, bias=False)
# transformation of decoder hidden states and context vectors before reading out target words
# this produces the `attentional vector` in (Luong et al., 2015)
self.att_vec_linear = nn.Linear(args.hidden_size + args.hidden_size, args.att_vec_size, bias=False)
# bias for predicting ApplyConstructor and GenToken actions
self.production_readout_b = nn.Parameter(torch.FloatTensor(len(transition_system.grammar) + 1).zero_())
self.tgt_token_readout_b = nn.Parameter(torch.FloatTensor(len(vocab.primitive)).zero_())
if args.no_query_vec_to_action_map:
# if there is no additional linear layer between the attentional vector (i.e., the query vector)
# and the final softmax layer over target actions, we use the attentional vector to compute action
# probabilities
assert args.att_vec_size == args.action_embed_size
self.production_readout = lambda q: F.linear(q, self.production_embed.weight, self.production_readout_b)
self.tgt_token_readout = lambda q: F.linear(q, self.primitive_embed.weight, self.tgt_token_readout_b)
else:
# by default, we feed the attentional vector (i.e., the query vector) into a linear layer without bias, and
# compute action probabilities by dot-producting the resulting vector and (GenToken, ApplyConstructor) action embeddings
# i.e., p(action) = query_vec^T \cdot W \cdot embedding
self.query_vec_to_action_embed = nn.Linear(args.att_vec_size, args.embed_size, bias=args.readout == 'non_linear')
if args.query_vec_to_action_diff_map:
# use different linear transformations for GenToken and ApplyConstructor actions
self.query_vec_to_primitive_embed = nn.Linear(args.att_vec_size, args.embed_size, bias=args.readout == 'non_linear')
else:
self.query_vec_to_primitive_embed = self.query_vec_to_action_embed
self.read_out_act = torch.tanh if args.readout == 'non_linear' else nn_utils.identity
self.production_readout = lambda q: F.linear(self.read_out_act(self.query_vec_to_action_embed(q)),
self.production_embed.weight, self.production_readout_b)
self.tgt_token_readout = lambda q: F.linear(self.read_out_act(self.query_vec_to_primitive_embed(q)),
self.primitive_embed.weight, self.tgt_token_readout_b)
# dropout layer
self.dropout = nn.Dropout(args.dropout)
if args.bert_path:
self.tokenizer = BertTokenizer.from_pretrained(args.bert_path)
self.automodel = BertModel.from_pretrained(args.bert_path)
self.linear_mapper = nn.Linear(768, args.embed_size)
self.bert_norm = nn.LayerNorm(768)
if args.cuda:
self.new_long_tensor = torch.cuda.LongTensor
self.new_tensor = torch.cuda.FloatTensor
self.device = 'cuda'
else:
self.new_long_tensor = torch.LongTensor
self.new_tensor = torch.FloatTensor
self.device = 'cpu'
def encode(self, src_sents_var, src_sents_len):
"""Encode the input natural language utterance
Args:
src_sents_var: a variable of shape (src_sent_len, batch_size), representing word ids of the input
src_sents_len: a list of lengths of input source sentences, sorted by descending order
Returns:
src_encodings: source encodings of shape (batch_size, src_sent_len, hidden_size * 2)
last_state, last_cell: the last hidden state and cell state of the encoder,
of shape (batch_size, hidden_size)
"""
# (tgt_query_len, batch_size, embed_size)
# apply word dropout
if self.args.bert_path is None:
if self.training and self.args.word_dropout:
mask = Variable(self.new_tensor(src_sents_var.size()).fill_(1. - self.args.word_dropout).bernoulli().long())
src_sents_var = src_sents_var * mask + (1 - mask) * self.vocab.source.unk_id
src_token_embed = self.src_embed(src_sents_var)
else:
src_token_embed = src_sents_var
packed_src_token_embed = pack_padded_sequence(src_token_embed, src_sents_len)
# src_encodings: (tgt_query_len, batch_size, hidden_size)
src_encodings, (last_state, last_cell) = self.encoder_lstm(packed_src_token_embed)
src_encodings, _ = pad_packed_sequence(src_encodings)
# src_encodings: (batch_size, tgt_query_len, hidden_size)
src_encodings = src_encodings.permute(1, 0, 2)
# (batch_size, hidden_size * 2)
last_state = torch.cat([last_state[0], last_state[1]], 1)
last_cell = torch.cat([last_cell[0], last_cell[1]], 1)
return src_encodings, (last_state, last_cell)
def _bert_encode(self, sequences):
seq_idxs = []
seq_lens = []
token_lo_hi_lists = list()
test_len = 0
for seq in sequences:
test_len = max(test_len, len(seq))
tokens, token_lo_hi_list = generate_inputs(self.tokenizer, seq)
seq_lens.append(len(tokens))
seq_idxs.append(self.tokenizer.convert_tokens_to_ids(tokens))
token_lo_hi_lists.append(token_lo_hi_list)
batch_size = len(seq_lens)
maxlen_seq = max(seq_lens)
hidden_size = self.automodel.config.hidden_size
input_idxs = torch.zeros(batch_size, maxlen_seq).long()
input_mask = torch.zeros(batch_size, maxlen_seq).bool()
for k, (idxs, len_) in enumerate(zip(seq_idxs, seq_lens)):
input_idxs[k, :len_] = torch.LongTensor(idxs)
input_mask[k, :len_] = True
input_idxs = input_idxs.to(self.device)
input_mask = input_mask.to(self.device)
if self.args.finetune_bert:
hidden_batch = self.automodel(input_idxs, input_mask)[0]
else:
with torch.no_grad():
self.automodel.train(mode=False)
hidden_batch = self.automodel(input_idxs, input_mask)[0]
if torch.any(torch.isnan(hidden_batch)):
for seq in sequences:
print(seq)
raise ValueError
# merge subtoken embeddings by mean pool
output_batch = list()
for bi, hidden in enumerate(hidden_batch):
token_lo_hi_list = token_lo_hi_lists[bi]
output_ = list()
for lo, hi in token_lo_hi_list:
hidden_lo_hi = torch.mean(hidden[lo:hi, :], dim=0, keepdim=True)
if torch.any(torch.isnan(hidden_lo_hi)):
print(sequences[bi])
print(token_lo_hi_list)
raise ValueError
output_.append(hidden_lo_hi)
output_batch.append(torch.cat(output_, dim=0))
output = torch.nn.utils.rnn.pad_sequence(output_batch, batch_first=False, padding_value=0)
if torch.any(torch.isnan(output)):
for seq in sequences:
print(seq)
raise ValueError
assert output.size(0) == test_len
output = self.bert_norm(output)
output = self.linear_mapper(output)
if torch.any(torch.isnan(output)):
for seq in sequences:
print(seq)
raise ValueError
return output
def init_decoder_state(self, enc_last_state, enc_last_cell):
"""Compute the initial decoder hidden state and cell state"""
h_0 = self.decoder_cell_init(enc_last_cell)
h_0 = torch.tanh(h_0)
return h_0, Variable(self.new_tensor(h_0.size()).zero_())
def score(self, examples, return_encode_state=False):
"""Given a list of examples, compute the log-likelihood of generating the target AST
Args:
examples: a batch of examples
return_encode_state: return encoding states of input utterances
output: score for each training example: Variable(batch_size)
"""
batch = Batch(examples, self.grammar, self.vocab, copy=self.args.no_copy is False, cuda=self.args.cuda)
# src_encodings: (batch_size, src_sent_len, hidden_size * 2)
# (last_state, last_cell, dec_init_vec): (batch_size, hidden_size)
if self.args.bert_path:
src_sents_var = self._bert_encode(batch.src_sents)
else:
src_sents_var = batch.src_sents_var
src_encodings, (last_state, last_cell) = self.encode(src_sents_var, batch.src_sents_len)
dec_init_vec = self.init_decoder_state(last_state, last_cell)
# query vectors are sufficient statistics used to compute action probabilities
# query_vectors: (tgt_action_len, batch_size, hidden_size)
# if use supervised attention
if self.args.sup_attention:
query_vectors, att_prob = self.decode(batch, src_encodings, dec_init_vec)
else:
query_vectors = self.decode(batch, src_encodings, dec_init_vec)
# ApplyRule (i.e., ApplyConstructor) action probabilities
# (tgt_action_len, batch_size, grammar_size)
apply_rule_prob = F.softmax(self.production_readout(query_vectors), dim=-1)
# probabilities of target (gold-standard) ApplyRule actions
# (tgt_action_len, batch_size)
tgt_apply_rule_prob = torch.gather(apply_rule_prob, dim=2,
index=batch.apply_rule_idx_matrix.unsqueeze(2)).squeeze(2)
#### compute generation and copying probabilities
# (tgt_action_len, batch_size, primitive_vocab_size)
gen_from_vocab_prob = F.softmax(self.tgt_token_readout(query_vectors), dim=-1)
# (tgt_action_len, batch_size)
tgt_primitive_gen_from_vocab_prob = torch.gather(gen_from_vocab_prob, dim=2,
index=batch.primitive_idx_matrix.unsqueeze(2)).squeeze(2)
if self.args.no_copy:
# mask positions in action_prob that are not used
if self.training and self.args.primitive_token_label_smoothing:
# (tgt_action_len, batch_size)
# this is actually the negative KL divergence size we will flip the sign later
# tgt_primitive_gen_from_vocab_log_prob = -self.label_smoothing(
# gen_from_vocab_prob.view(-1, gen_from_vocab_prob.size(-1)).log(),
# batch.primitive_idx_matrix.view(-1)).view(-1, len(batch))
tgt_primitive_gen_from_vocab_log_prob = -self.label_smoothing(
gen_from_vocab_prob.log(),
batch.primitive_idx_matrix)
else:
tgt_primitive_gen_from_vocab_log_prob = tgt_primitive_gen_from_vocab_prob.log()
# (tgt_action_len, batch_size)
action_prob = tgt_apply_rule_prob.log() * batch.apply_rule_mask + \
tgt_primitive_gen_from_vocab_log_prob * batch.gen_token_mask
else:
# binary gating probabilities between generating or copying a primitive token
# (tgt_action_len, batch_size, 2)
primitive_predictor = F.softmax(self.primitive_predictor(query_vectors), dim=-1)
# pointer network copying scores over source tokens
# (tgt_action_len, batch_size, src_sent_len)
primitive_copy_prob = self.src_pointer_net(src_encodings, batch.src_token_mask, query_vectors)
# marginalize over the copy probabilities of tokens that are same
# (tgt_action_len, batch_size)
tgt_primitive_copy_prob = torch.sum(primitive_copy_prob * batch.primitive_copy_token_idx_mask, dim=-1)
# | |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from datetime import datetime
import pytest
from mock import MagicMock
from intelliflow.api_ext import *
from intelliflow.core.application.application import Application
from intelliflow.core.platform.definitions.compute import (
ComputeFailedSessionState,
ComputeFailedSessionStateType,
ComputeResourceDesc,
ComputeResponse,
ComputeSessionDesc,
ComputeSessionState,
ComputeSuccessfulResponse,
ComputeSuccessfulResponseType,
)
from intelliflow.core.signal_processing import Slot
from intelliflow.core.signal_processing.definitions.metric_alarm_defs import AlarmDimension
from intelliflow.core.signal_processing.signal import *
from intelliflow.mixins.aws.test import AWSTestBase
from intelliflow.utils.test.inlined_compute import NOOPCompute
class TestAWSApplicationAlarmingMetrics(AWSTestBase):
def test_application_alarming_and_metrics_system_metrics_using_all_apis(self):
self.patch_aws_start(glue_catalog_has_all_tables=True)
app = AWSApplication("alarming-sys", self.region)
# SYSTEM METRICS
system_metrics_map = app.get_platform_metrics(HostPlatform.MetricType.SYSTEM)
# check system metrics and see their alias' and sub-dimensions!
for driver_metric_map in system_metrics_map.values():
for metric in driver_metric_map.values():
# dumps metric group ID/alias -> specific MetricNames and other details
assert json.loads(metric.describe())["metric_stats"]
processor_metrics_map = system_metrics_map[ProcessingUnit]
routing_metrics_map = system_metrics_map[RoutingTable]
# 1-use metric ID/alias' to retrieve them from the map
# 2-and then use 'MetricName' to get a concrete/materialized metric to be used in an Alarm.
# these signals can be bind into alarms now
routing_table_metric_signal = routing_metrics_map["routingTable"]
routing_table_getitem_operation_metric_signal = routing_metrics_map["routingTable.GetItem"]
# the following can be directly inputted to an alarm by starting with Statistic as the first dimension
processor_core_metric_signal = processor_metrics_map["processor.core"]
# low level alarm into an underlying driver resource
# raise if write throttling in RheocerOS routing table is more than 50 in 15 mins in two data-points out of 3.
# or route object retrieval latency is more than 500ms.
#
# Please note that this alarm can still be aggregated into a composite alarm (which would still use the same dedupe_str)
# this can enable you to partition alarm definitions and then merge them to create system wide OE view.
routing_table_alarm = app.create_alarm(
id="routing_table_alarm",
target_metric_or_expression="(m1 > 50 OR m2 > 500)",
metrics={
"m1": routing_table_metric_signal["WriteThrottleEvents"][MetricStatistic.SUM][MetricPeriod.MINUTES(15)],
"m2": routing_table_getitem_operation_metric_signal["SuccessfulRequestLatency"][MetricStatistic.AVERAGE][
MetricPeriod.MINUTES(15)
],
},
number_of_evaluation_periods=3,
number_of_datapoint_periods=2,
comparison_operator=AlarmComparisonOperator.GreaterThanOrEqualToThreshold,
# since we are using logical/conditional operator in metric math, the output series only contains 1s or 0s.
# so threshold will be 1 to detect.
threshold=1,
default_actions=AlarmDefaultActionsMap(
ALARM_ACTIONS=set(),
OK_ACTIONS=set(),
INSUFFICIENT_DATA_ACTIONS=set(),
),
)
# Sev5 on core Processor (paranoid / warning shot for oncall)
# if Processor has an execution Error (message sent to DLQ probably) or duration max hits 600 x 1000 (10 minutes, approacing to 15 mins timeout)
processor_alarm = app.create_alarm(
id="processor_alarm",
# refer
# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html
target_metric_or_expression="(m1 > 1 OR m2 > 600000)",
metrics={
"m1": processor_core_metric_signal["Errors"][MetricStatistic.SUM][MetricPeriod.MINUTES(5)],
"m2": processor_core_metric_signal["Duration"][MetricStatistic.MAXIMUM][MetricPeriod.MINUTES(5)],
},
number_of_evaluation_periods=1,
number_of_datapoint_periods=1,
comparison_operator=AlarmComparisonOperator.GreaterThanOrEqualToThreshold,
# since we are using logical/conditional operator in metric math, the output series only contains 1s or 0s.
# so threshold will be 1 to detect.
threshold=1,
default_actions=AlarmDefaultActionsMap(ALARM_ACTIONS=set()),
)
# External METRICS
# import a metric definition from the same account
# generic representation of the metrics from this particular lambda.
external_lambda_metric = app.marshal_external_metric(
external_metric_desc=CWMetric(namespace="AWS/Lambda"), id="lambda_metric", sub_dimensions={"FunctionName": "LambdaFunction"}
)
# import the same metric in a different, more flexible way.
# This shows Lambda Error metric can be imported into the system in
# different ways (different IDs, default alias').
# And also with defaults/overwrites on metric dimensions
external_lambda_error_metric_on_another_func = app.marshal_external_metric(
external_metric_desc=CWMetric(namespace="AWS/Lambda"),
id="my_test_function_error",
dimension_filter={
"Error": { # only keep 'Error'
MetricStatistic.SUM: { # support SUM only
MetricPeriod.MINUTES(5): {
# restrict the use of this metric with 5 mins period only (in alarms)
"*": {} # (reserved) Any MetricDimension.TIME
}
}
}
},
sub_dimensions={"functionname": "LambdaFunction"},
)
system_failure_alarm = app.create_alarm(
id="system_failure",
# refer
# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax
target_metric_or_expression="SUM(METRICS())",
# will validate if metrics are materialized (i.e NAME, Statistic and Period dimensions are material or not).
metrics=[
external_lambda_metric["Error"][MetricStatistic.SUM][MetricPeriod.MINUTES(5)],
external_lambda_error_metric_on_another_func,
],
number_of_evaluation_periods=5,
number_of_datapoint_periods=3,
comparison_operator=AlarmComparisonOperator.GreaterThanOrEqualToThreshold,
threshold=1,
)
# SERIALIZATION: inject serialize/deserialize sequence for enhanced serialization coverage
json_str = app.dev_context.to_json()
dev_context = CoreData.from_json(json_str)
app._dev_context = dev_context
#
app.activate(allow_concurrent_executions=False)
# yields None, external metrics cannot be emitted (note that this is not due to unmaterialized metric Name)
assert app.platform.diagnostics["my_test_function_error"] is None
assert app.platform.diagnostics["lambda_metric"] is None
self.patch_aws_stop()
def test_application_alarming_and_metrics_custom_metric(self):
self.patch_aws_start(glue_catalog_has_all_tables=True)
app = AWSApplication("alarming-cust", self.region)
# 'id' field here represents 'Metric Group Id' (metric sub-space), it will be a part of the concrete metric instance
# (in AWS CloudWactch for example) as a sub_dimension ({'MetricGroupId': 'my_custom_spark_error_metric'}) along with
# other sub-dimensions.
# It is also used to retrieve this abstract 'metric' declaration during development and also from Diagnostics
# module at runtime.
# Do the following in your Spark code to emit at runtime for example:
# runtime_platform.diagnostics["my_custom_spark_error_metric"]["Error"].emit(1)
internal_spark_error_metric = app.create_metric(
id="my_custom_spark_error_metric",
dimension_filter={
"Error": { # only keep 'Error' as NAME dimension
"*": { # support all statistic (no filter spec)
"*": {
# support all periods (during the emission sets Storage Resolution to 1)
"*": {} # (reserved) Any MetricDimension.TIME
}
}
}
},
)
# totally separate unrelated metric due to different sub_dimensions (despite identical metric IDs)
internal_spark_error_metric2 = app.create_metric(
id="my_custom_spark_error_metric",
sub_dimensions={"marketplace_id": "1"},
dimension_filter={"Error": {"*": {"*": {"*": {}}}}}, # only allowed NAME dimension
)
internal_spark_error_metric3 = app.create_metric(
id="my_custom_spark_error_metric",
sub_dimensions={"marketplace_id": "3"}
# dimension_filter is not specified, so during emission
# NAME dimension should be specified explicitly
)
# Example for internal / custom metric declaration with sub-dimensions (for further specialization)
# When Metric Name is specified during emission, this would look like this in Cloudwatch:
# {
# "Namespace": "if-alarming-cust-<ACC_ID>-<REGION_ID>",
# "MetricName": "<WHATEVER NAME you'll use during in emit call (e.g 'Error')",
# "Dimensions": {"MetricGroupId": "my_custom_spark_cumulative",
# "my_custom_dim": "ALL"}
# }
internal_spark_metric_ALL = app.create_metric(
id="my_custom_spark_cumulative",
sub_dimensions={"my_custom_dim": "ALL"}
# dimension_filter is not specified, so during emission
# NAME dimension should be specified explicitly
)
# 'id' here is just a key to retrieve this metric declaration from Application and also from Diagnostics at runtime.
# So if 'Error' metric name will be used to emit from this, it won't contribute to the same metric from
# 'my_custom_spark_error_metric' above at runtime. Because metrics are uniquely defined by Name + sub_dimensions and
# as mentioned above 'id' is automatically added to sub_dimensions to differentiate/isolate all of the internal metrics
# from each other.
#
# This can be emitted from your Spark code by doing the following for example:
#
# runtime_platform.diagnostics["my_app_error_metric_def"]["Error"].emit(1)
generic_internal_metric = app.create_metric(id="my_app_error_metric_def")
# ALARM with default ticket action
etl_error_alarm = app.create_alarm(
id="one_or_more_spark_executions_failed",
target_metric_or_expression=internal_spark_error_metric["Error"][MetricStatistic.SUM][MetricPeriod.MINUTES(5)],
number_of_evaluation_periods=1,
number_of_datapoint_periods=1,
comparison_operator=AlarmComparisonOperator.GreaterThanOrEqualToThreshold,
threshold=1,
)
etl_error_alarm_with_all_metric_variants = app.create_alarm(
id="one_or_more_spark_executions_failed_2",
target_metric_or_expression="SUM(METRICS()) > 0",
metrics=[
internal_spark_error_metric["Error"][MetricStatistic.SUM][MetricPeriod.MINUTES(5)],
internal_spark_error_metric2["Error"][MetricStatistic.SUM][MetricPeriod.MINUTES(5)],
internal_spark_error_metric3["Failure"][MetricStatistic.SUM][MetricPeriod.MINUTES(5)],
],
)
generic_internal_alarm = app.create_alarm(
id="generic_error_alarm",
# refer
# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax
target_metric_or_expression="errors > 0 OR failures > 0",
# returns a time series with each point either 1 or 0
metrics={
"errors": generic_internal_metric["MY_CUSTOM_ERROR"][MetricStatistic.SUM][MetricPeriod.MINUTES(5)],
"failures": generic_internal_metric["MY_CUSTOM_FAILURE"][MetricStatistic.SUM][MetricPeriod.MINUTES(5)],
},
number_of_evaluation_periods=1,
number_of_datapoint_periods=1,
comparison_operator=AlarmComparisonOperator.GreaterThanOrEqualToThreshold,
threshold=1,
)
composite_alarm = app.create_composite_alarm(
id="system_monitor", alarm_rule=~(etl_error_alarm["OK"] | generic_internal_alarm["OK"])
)
monitor_failure_reactor = app.create_data(
id="system_failure_reactor", inputs=[composite_alarm[AlarmState.ALARM.value]], compute_targets=[NOOPCompute]
)
# show retrieval scheme for internal custom metrics before the activation
assert not app.get_metric("my_custom_spark_error_metric")
assert set(app.get_metric("my_custom_spark_error_metric", context=Application.QueryContext.DEV_CONTEXT)) == {
internal_spark_error_metric,
internal_spark_error_metric2,
internal_spark_error_metric3,
}
matched_metrics = app.get_metric(
"my_custom_spark_error_metric", sub_dimensions={"marketplace_id": "1"}, context=Application.QueryContext.DEV_CONTEXT
)
assert len(matched_metrics) == 1
assert internal_spark_error_metric2 is matched_metrics[0]
# check alarm APIs
assert not app.get_alarm("one_or_more_spark_executions_failed_2")
assert not app.alarm("one_or_more_spark_executions_failed_2")
with pytest.raises(ValueError):
alarm_node = app["one_or_more_spark_executions_failed_2"]
assert set(app.get_alarm("one_or_more_spark_executions_failed_2", context=Application.QueryContext.DEV_CONTEXT)) == {
etl_error_alarm_with_all_metric_variants
}
assert not app.get_alarm(
"one_or_more_spark_executions_failed_2", alarm_type=AlarmType.COMPOSITE, context=Application.QueryContext.DEV_CONTEXT
)
app.activate(allow_concurrent_executions=False)
assert app.get_alarm("one_or_more_spark_executions_failed_2")[0] == etl_error_alarm_with_all_metric_variants
assert app["one_or_more_spark_executions_failed_2"] == etl_error_alarm_with_all_metric_variants
with pytest.raises(ValueError):
alarm_node = app["one_or_more_spark_executions_failed_2" : AlarmType.COMPOSITE]
# multiple internal custom metrics with the same ID
with pytest.raises(ValueError):
metric = app["my_custom_spark_error_metric"]
with pytest.raises(ValueError):
app.metric("my_custom_spark_error_metric")
assert set(app.get_metric("my_custom_spark_error_metric")) == {
internal_spark_error_metric,
internal_spark_error_metric2,
internal_spark_error_metric3,
}
assert internal_spark_error_metric2 is app.metric("my_custom_spark_error_metric", sub_dimensions={"marketplace_id": "1"})
# use Application::__getitem__
assert internal_spark_error_metric2 is app["my_custom_spark_error_metric":{"marketplace_id": "1"}]
assert internal_spark_error_metric3 is app["my_custom_spark_error_metric":{"marketplace_id": "3"}]
# none of the variants of "my_custom_spark_error_metric" have both of these sub-dimensions
assert app.metric("my_custom_spark_error_metric", sub_dimensions={"marketplace_id": "1", "region": "EU"}) is None
assert app.metric("my_custom_spark_error_metric", sub_dimensions={"marketplace_id": "SUB DIMENSION DOES NOT MATCH!"}) is None
with pytest.raises(ValueError):
metric = app["my_custom_spark_error_metric":{"marketplace_id": "RANDOM NONEXISTENT SUB!"}]
with pytest.raises(ValueError):
# there is no metric definition with mp_id 2
metric = app["my_custom_spark_error_metric":{"marketplace_id": "2"}]
# RUNNTIME (test emission)
# TEST / MANUAL emission from local Python dev-endpoint
# These calls are actually supposed to be done at runtime (in Spark, Lambda code, etc)
# sub_dimensions=None -> matches all of the variants
# AMBIGUOUS call any of the metric group variants can be picked here
# This means that there is no guaranteed way to emit from the 1st variant (without sub-dimensions)
assert app.platform.diagnostics.get_internal_metric("my_custom_spark_error_metric").signal in {
internal_spark_error_metric.signal(),
internal_spark_error_metric2.signal(),
| |
<gh_stars>0
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import annotations
import numpy as np
import pandas as pd
from dataclasses import dataclass, fields
from typing import Iterator, List, Optional
import torch
from torch.utils.data import IterableDataset, Dataset
from .dataset import TimeSeries, TimeSeriesDataset
class WeightedIndexIterator:
"""Iterator that caches a number of indices sampled according to given weights.
This gives a great performance speedup since np.random.choice is the bottleneck
of the data loading. This class samples and caches a certain number of indices and
return them until new ones need to be sampled.
"""
def __init__(self, weights: np.ndarray, num_cache: int = 1024):
"""
Args:
weights: np.ndarray, containing the sample weights
num_cache: the number of indices to cache
"""
self.weights = weights
self.num_cache = num_cache
def __iter__(self):
self.i = self.num_cache
return self
def __next__(self):
if self.i >= self.num_cache:
self.idx = np.random.choice(len(self.weights), p=self.weights, size=self.num_cache)
self.i = 0
idx = self.idx[self.i]
self.i += 1
return idx
@dataclass
class Triplet:
"""
A triplet is composed of a support set, observed queries, and corresponding (unobserved) future queries.
"""
support_set: List[TimeSeries] # length: support_set_size, length TimeSeries: context_length
query_past: List[TimeSeries] # length: num_queries, length TimeSeries: context_length
query_future: List[TimeSeries] # length: num_queries, length TimeSeries: prediction_length
def __iter__(self):
return (getattr(self, field.name) for field in fields(self))
class TripletDataset(Dataset[Triplet]):
"""
The triplet dataset gets a list of queries and corresponding support set and returns them as triplets.
"""
def __init__(self, queries: TimeSeriesDataset, support_sets: List[List[TimeSeries]]):
self.queries = queries
self.support_sets = support_sets
assert len(queries) == len(
support_sets
), "For each query there must be exactly one support set"
def __len__(self) -> int:
return len(self.queries)
def __getitem__(self, index: int) -> Triplet:
pl = self.queries.prediction_length
ts_l = len(self.queries[index])
query_past = [self.queries[index][0:-pl]]
query_future = [self.queries[index][ts_l - pl : ts_l]]
return Triplet(self.support_sets[index], query_past, query_future)
class SamplingTripletDataset(IterableDataset[Triplet]): # type: ignore
"""
The sampling triplet dataset randomly samples support sets and past queries
along with their future prediction horizon.
All three sets consist of time series windows sliced from the original time series. The support set time series
end before the prediction horizon begins to avoid time leakage.
The dataset yields infinitely many items. Support set time series length is for now context_length.
"""
def __init__(
self,
dataset: TimeSeriesDataset,
support_set_size: int,
num_queries: int,
context_length: int,
support_length: int,
prediction_length: int,
catch22_nn: Optional[np.ndarray] = None,
cheat: float = 0.0,
):
"""
Args:
dataset: The dataset to sample from.
support_set_size: The size of the support set.
num_queries: The number of queries.
context_length: The length of the context.
support_length: The length of the support time series.
prediction_length: The length of the prediction.
catch22_nn: Contains for each index its 100 nearest neighbors w.r.t. catch22 distance.
If not None, slices from the closest `support_set_size` time series are chosen as support set.
cheat: If true, the query (time series to be predicted) shifted by the prediction length
is contained in the support set, i.e. the ground truth is in the support set.
"""
# Initialize
super().__init__()
self.dataset = dataset
self.support_set_size = support_set_size
self.num_queries = num_queries
self.context_length = context_length
self.support_length = support_length
self.prediction_length = prediction_length
self.cheat = cheat
assert not cheat or self.num_queries == 1, "Cheat sampling only allows num_queries = 1"
time_series_lengths = np.array([len(s) for s in dataset])
time_series_weights = time_series_lengths / time_series_lengths.sum()
self.index_iterator = iter(WeightedIndexIterator(time_series_weights))
self.catch22_nn = catch22_nn
assert not (
self.catch22_nn is not None and num_queries > 1
), "catch22 support set selection only works with num_queries equal to one"
def __iter__(self) -> Iterator[Triplet]:
while True:
query_past, query_future, cheat_query, query_idx = self._sample_queries()
# We do not use the qsplit option for training
support_set = sample_supps(
supps_size=self.support_set_size,
length=self.support_length,
dataset=self.dataset,
cheat_query=cheat_query[0] if np.random.rand() < self.cheat else None,
index_iterator=self.index_iterator
if self.catch22_nn is None
else iter(self.catch22_nn[query_idx]),
)
yield Triplet(support_set, query_past, query_future)
def _sample_queries(self):
query_past = []
query_future = []
query_cheat = []
for _ in range(self.num_queries):
# First, sample a time series with probability relative to its length
idx = next(self.index_iterator)
series = self.dataset[idx]
# Then, sample a slice with uniform probability
# context should be at least prediction length long
split_point = np.random.choice(
np.arange(self.prediction_length, len(series) - self.prediction_length + 1)
)
prediction = series[split_point : split_point + self.prediction_length]
context_start = max(0, split_point - self.context_length)
context = series[context_start:split_point]
query_past.append(context)
query_future.append(prediction)
# sample the start of the cheat time series
cheat_earliest_start = max(
0,
context_start - self.support_length + self.context_length + self.prediction_length,
)
cheat_start = np.random.choice(np.arange(cheat_earliest_start, context_start + 1))
cheat_end = min(len(series), cheat_start + self.support_length)
query_cheat.append(series[cheat_start:cheat_end])
return query_past, query_future, query_cheat, idx
class SequentialTripletDataset(Dataset[Triplet]): # type: ignore
"""
The sequential triplet dataset traverses the dataset and uses the last prediction length slice as future query.
The support set is sampled randomly. The length of dataset is the number of times series
divided by the number of queries.
"""
def __init__(
self,
dataset: TimeSeriesDataset,
support_set_size: int,
num_queries: int,
context_length: int,
support_length: int,
prediction_length: int,
support_dataset: TimeSeriesDataset = None,
seed: Optional[int] = None,
catch22_nn: Optional[np.ndarray] = None,
cheat: bool = False,
):
"""
Args:
dataset: The dataset to sample from.
support_set_size: The size of the support set.
num_queries: The number of queries.
context_length: The length of the context.
support_length: The length of the support time series.
prediction_length: The length of the prediction.
support_dataset: The dataset to choose the support set from. If not provided `dataset` is used.
This is used to choose the support set for the test split from the val split (technical reasons).
seed: The random seed for sampling the support set
catch22_nn: Contains for each index its 100 nearest neighbors w.r.t. catch22 distance.
If not None, slices from the closest `support_set_size` time series are chosen as support set.
cheat: If true, the query (time series to be predicted) shifted by the prediction length
is contained in the support set, i.e. the ground truth is in the support set.
"""
super().__init__()
self.dataset = dataset
self.support_set_size = support_set_size
self.num_queries = num_queries
self.context_length = context_length
self.support_length = support_length
self.prediction_length = prediction_length
self.cheat = cheat
assert not cheat or self.num_queries == 1, "Cheat sampling only allows num_queries = 1"
self.support_dataset = support_dataset or dataset
self.seed = seed
if catch22_nn is None:
time_series_lengths = np.array([len(s) for s in self.support_dataset])
time_series_weights = time_series_lengths / time_series_lengths.sum()
self.index_iterator = iter(WeightedIndexIterator(time_series_weights))
self.catch22_nn = catch22_nn
assert not (
self.catch22_nn is not None and num_queries > 1
), "catch22 support set selection only works with num_queries equal to one"
def __len__(self) -> int:
return len(self.dataset) // self.num_queries
def __getitem__(self, index: int) -> Triplet:
start = index * self.num_queries
end = start + self.num_queries
query_past, query_future = zip(
*(self._last_slice(self.dataset[i]) for i in range(start, end))
)
# support time series should end before earliest start of future queries
q_split = min(q.start_date for q in query_future)
query = self.dataset[index]
cheat_query = query[max(0, len(query) - self.support_length) : len(query)]
support_set = sample_supps(
supps_size=self.support_set_size,
length=self.support_length,
dataset=self.support_dataset,
q_split=q_split,
# TODO: The seeding does not work anymore, we could use torch again in the validation split
# but this will be slower
# seed=self.seed + index if self.seed else None,
cheat_query=cheat_query if np.random.rand() < self.cheat else None,
index_iterator=self.index_iterator
if self.catch22_nn is None
else iter(self.catch22_nn[query_past[0].item_id]),
)
return Triplet(support_set, query_past, query_future)
def _last_slice(self, series: TimeSeries):
split_point = len(series) - self.dataset.prediction_length
prediction = series[split_point : split_point + self.prediction_length]
context = series[max(0, split_point - self.context_length) : split_point]
return context, prediction
def sample_supps(
supps_size: int,
length: int,
dataset: TimeSeriesDataset,
index_iterator: Iterator,
q_split: Optional[pd.Timestamp] = None,
# seed: Optional[int] = None,
cheat_query: Optional[TimeSeries] = None,
):
"""
Args:
supps_size: The number of support time series
length: The length of the support time series slice
dataset: The dataset to | |
"void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x80083CF8)
SetType(0x80083CF8, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x80083D00)
SetType(0x80083D00, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x80083D08)
SetType(0x80083D08, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x80083D30)
SetType(0x80083D30, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x80083D8C)
SetType(0x80083D8C, "unsigned short GetDown__C4CPad(struct CPad *this)")
del_items(0x80083DB4)
SetType(0x80083DB4, "unsigned short GetUp__C4CPad(struct CPad *this)")
del_items(0x80083DDC)
SetType(0x80083DDC, "unsigned char CheckActive__4CPad(struct CPad *this)")
del_items(0x80083DE8)
SetType(0x80083DE8, "unsigned long ReadPadStream__Fv()")
del_items(0x80083F00)
SetType(0x80083F00, "void PAD_Handler__Fv()")
del_items(0x800840B8)
SetType(0x800840B8, "struct CPad *PAD_GetPad__FiUc(int PadNum, unsigned char both)")
del_items(0x80084154)
SetType(0x80084154, "void NewVal__4CPadUs(struct CPad *this, unsigned short New)")
del_items(0x8008428C)
SetType(0x8008428C, "void BothNewVal__4CPadUsUs(struct CPad *this, unsigned short New, unsigned short New2)")
del_items(0x800843E8)
SetType(0x800843E8, "unsigned short Trans__4CPadUs(struct CPad *this, unsigned short PadVal)")
del_items(0x8008450C)
SetType(0x8008450C, "void _GLOBAL__I_Pad0()")
del_items(0x80084544)
SetType(0x80084544, "void SetPadType__4CPadUc(struct CPad *this, unsigned char val)")
del_items(0x8008454C)
SetType(0x8008454C, "unsigned char CheckActive__4CPad_addr_8008454C(struct CPad *this)")
del_items(0x80084558)
SetType(0x80084558, "void SetActive__4CPadUc(struct CPad *this, unsigned char a)")
del_items(0x80084560)
SetType(0x80084560, "void SetBothFlag__4CPadUc(struct CPad *this, unsigned char fl)")
del_items(0x80084568)
SetType(0x80084568, "struct CPad *__4CPadi(struct CPad *this, int PhysStick)")
del_items(0x8008459C)
SetType(0x8008459C, "void Flush__4CPad(struct CPad *this)")
del_items(0x800845C0)
SetType(0x800845C0, "void Set__7FontTab(struct FontTab *this)")
del_items(0x8008465C)
SetType(0x8008465C, "void InitPrinty__Fv()")
del_items(0x800846E4)
SetType(0x800846E4, "void SetTextDat__5CFontP7TextDat(struct CFont *this, struct TextDat *NewDat)")
del_items(0x800846EC)
SetType(0x800846EC, "int PrintChar__5CFontUsUscUcUcUc(struct CFont *this, unsigned short Cx, unsigned short Cy, char C, int R, int G, int B)")
del_items(0x8008486C)
SetType(0x8008486C, "int Print__5CFontiiPc8TXT_JUSTP4RECTUcUcUc(struct CFont *this, int X, int Y, char *Str, enum TXT_JUST Justify, struct RECT *TextWindow, int R, int G, int B)")
del_items(0x80084E8C)
SetType(0x80084E8C, "int GetStrWidth__5CFontPc(struct CFont *this, char *Str)")
del_items(0x80084F40)
SetType(0x80084F40, "void SetChar__5CFontiUs(struct CFont *this, int ch, unsigned short Frm)")
del_items(0x80084FA4)
SetType(0x80084FA4, "int SetOTpos__5CFonti(struct CFont *this, int OT)")
del_items(0x80084FB0)
SetType(0x80084FB0, "void ClearFont__5CFont(struct CFont *this)")
del_items(0x80084FD4)
SetType(0x80084FD4, "bool IsDefined__5CFontUc(struct CFont *this, unsigned char C)")
del_items(0x80084FF4)
SetType(0x80084FF4, "int GetCharFrameNum__5CFontc(struct CFont *this, char ch)")
del_items(0x8008500C)
SetType(0x8008500C, "int GetCharWidth__5CFontc(struct CFont *this, char ch)")
del_items(0x80085064)
SetType(0x80085064, "void Init__5CFont(struct CFont *this)")
del_items(0x80085098)
SetType(0x80085098, "struct FRAME_HDR *GetFr__7TextDati_addr_80085098(struct TextDat *this, int FrNum)")
del_items(0x800850B4)
SetType(0x800850B4, "unsigned char TrimCol__Fs(short col)")
del_items(0x800850EC)
SetType(0x800850EC, "struct POLY_GT4 *DialogPrint__Fiiiiiiiiii(int Frm, int X, int Y, int SW, int SH, int UW, int UH, int UOfs, int VOfs, int Trans)")
del_items(0x80085A64)
SetType(0x80085A64, "struct POLY_G4 *GetDropShadowG4__FUcUcUcUcUcUcUcUcUcUcUcUc(unsigned char r0, unsigned char g0, unsigned char b0, unsigned char r1, int g1, int b1, int r2, int g2, int b2, int r3, int g3, int b3)")
del_items(0x80085B9C)
SetType(0x80085B9C, "void DropShadows__Fiiii(int x, int y, int w, int h)")
del_items(0x80085E40)
SetType(0x80085E40, "void InitDialog__Fv()")
del_items(0x80085F78)
SetType(0x80085F78, "void GetSizes__6Dialog(struct Dialog *this)")
del_items(0x800861D0)
SetType(0x800861D0, "void Back__6Dialogiiii(struct Dialog *this, int DX, int DY, int DW, int DH)")
del_items(0x80087390)
SetType(0x80087390, "void Line__6Dialogiii(struct Dialog *this, int DX, int DY, int DW)")
del_items(0x800875A8)
SetType(0x800875A8, "struct PAL *GetPal__7TextDati_addr_800875A8(struct TextDat *this, int PalNum)")
del_items(0x800875C4)
SetType(0x800875C4, "struct FRAME_HDR *GetFr__7TextDati_addr_800875C4(struct TextDat *this, int FrNum)")
del_items(0x800875E0)
SetType(0x800875E0, "void ATT_DoAttract__Fv()")
del_items(0x80087730)
SetType(0x80087730, "void CreatePlayersFromFeData__FR9FE_CREATE(struct FE_CREATE *CStruct)")
del_items(0x800877CC)
SetType(0x800877CC, "void UpdateSel__FPUsUsPUc(unsigned short *Col, unsigned short Add, unsigned char *Count)")
del_items(0x8008780C)
SetType(0x8008780C, "void CycleSelCols__Fv()")
del_items(0x8008799C)
SetType(0x8008799C, "int FindTownCreature__7CBlocksi(struct CBlocks *this, int GameEqu)")
del_items(0x80087A10)
SetType(0x80087A10, "int FindCreature__7CBlocksi(struct CBlocks *this, int MgNum)")
del_items(0x80087A64)
SetType(0x80087A64, "struct CBlocks *__7CBlocksiiiii(struct CBlocks *this, int BgId, int ObjId, int ItemId, int Level, int List)")
del_items(0x80087BB8)
SetType(0x80087BB8, "void SetTownersGraphics__7CBlocks(struct CBlocks *this)")
del_items(0x80087BF0)
SetType(0x80087BF0, "void SetMonsterGraphics__7CBlocksii(struct CBlocks *this, int Level, int List)")
del_items(0x80087CB8)
SetType(0x80087CB8, "void ___7CBlocks(struct CBlocks *this, int __in_chrg)")
del_items(0x80087D40)
SetType(0x80087D40, "void DumpGt4s__7CBlocks(struct CBlocks *this)")
del_items(0x80087DA8)
SetType(0x80087DA8, "void DumpRects__7CBlocks(struct CBlocks *this)")
del_items(0x80087E10)
SetType(0x80087E10, "void SetGraphics__7CBlocksPP7TextDatPii(struct CBlocks *this, struct TextDat **TDat, int *pId, int Id)")
del_items(0x80087E6C)
SetType(0x80087E6C, "void DumpGraphics__7CBlocksPP7TextDatPi(struct CBlocks *this, struct TextDat **TDat, int *Id)")
del_items(0x80087EBC)
SetType(0x80087EBC, "void PrintBlockOutline__7CBlocksiiiii(struct CBlocks *this, int x, int y, int r, int g, int b)")
del_items(0x80088208)
SetType(0x80088208, "void Load__7CBlocksi(struct CBlocks *this, int Id)")
del_items(0x800882B4)
SetType(0x800882B4, "void MakeRectTable__7CBlocks(struct CBlocks *this)")
del_items(0x80088388)
SetType(0x80088388, "void MakeGt4Table__7CBlocks(struct CBlocks *this)")
del_items(0x80088490)
SetType(0x80088490, "void MakeGt4__7CBlocksP8POLY_GT4P9FRAME_HDR(struct CBlocks *this, struct POLY_GT4 *GT4, struct FRAME_HDR *Fr)")
del_items(0x800885CC)
SetType(0x800885CC, "struct CBlock *GetBlock__7CBlocksi(struct CBlocks *this, int num)")
del_items(0x80088644)
SetType(0x80088644, "void Print__7CBlocks(struct CBlocks *this)")
del_items(0x8008866C)
SetType(0x8008866C, "void SetXY__7CBlocksii(struct CBlocks *this, int nx, int ny)")
del_items(0x80088694)
SetType(0x80088694, "void GetXY__7CBlocksPiT1(struct CBlocks *this, int *nx, int *ny)")
del_items(0x800886AC)
SetType(0x800886AC, "void PrintMap__7CBlocksii(struct CBlocks *this, int x, int y)")
del_items(0x80089BD0)
SetType(0x80089BD0, "void PrintGameSprites__7CBlocksiiiii(struct CBlocks *this, int ThisXPos, int ThisYPos, int OtPos, int ScrX, int ScrY)")
del_items(0x80089D40)
SetType(0x80089D40, "void PrintGameSprites__7CBlocksP8map_infoiiiiiii(struct CBlocks *this, struct map_info *piece, int OtPos, int ScrX, int ScrY, int R, int G, int B)")
del_items(0x8008AAB8)
SetType(0x8008AAB8, "void PrintSprites__7CBlocksP8map_infoiiiiiii(struct CBlocks *this, struct map_info *piece, int OtPos, int ScrX, int ScrY, int R, int G, int B)")
del_items(0x8008B17C)
SetType(0x8008B17C, "void PrintSprites__7CBlocksiiiii(struct CBlocks *this, int ThisXPos, int ThisYPos, int OtPos, int ScrX, int ScrY)")
del_items(0x8008B2EC)
SetType(0x8008B2EC, "int ScrToWorldX__7CBlocksii(struct CBlocks *this, int sx, int sy)")
del_items(0x8008B300)
SetType(0x8008B300, "int ScrToWorldY__7CBlocksii(struct CBlocks *this, int sx, int sy)")
del_items(0x8008B314)
SetType(0x8008B314, "void SetScrollTarget__7CBlocksii(struct CBlocks *this, int x, int y)")
del_items(0x8008B3D8)
SetType(0x8008B3D8, "void DoScroll__7CBlocks(struct CBlocks *this)")
del_items(0x8008B440)
SetType(0x8008B440, "void SetPlayerPosBlocks__7CBlocksiii(struct CBlocks *this, int PlayerNum, int bx, int by)")
del_items(0x8008B4E0)
SetType(0x8008B4E0, "void GetScrXY__7CBlocksR4RECTiiii(struct CBlocks *this, struct RECT *R, int x, int y, int sxoff, int syoff)")
del_items(0x8008B5B4)
SetType(0x8008B5B4, "void ShadScaleSkew__7CBlocksP8POLY_FT4(struct POLY_FT4 *Ft4)")
del_items(0x8008B634)
SetType(0x8008B634, "int WorldToScrX__7CBlocksii(struct CBlocks *this, int x, int y)")
del_items(0x8008B63C)
SetType(0x8008B63C, "int WorldToScrY__7CBlocksii(struct CBlocks *this, int x, int y)")
del_items(0x8008B650)
SetType(0x8008B650, "struct CBlocks *BL_GetCurrentBlocks__Fv()")
del_items(0x8008B65C)
SetType(0x8008B65C, "void PRIM_GetPrim__FPP8POLY_FT4_addr_8008B65C(struct POLY_FT4 **Prim)")
del_items(0x8008B6D8)
SetType(0x8008B6D8, "int GetHighlightCol__FiPiUsUsUs(int Index, int *SelList, unsigned short P1Col, unsigned short P2Col, int P12Col)")
del_items(0x8008B720)
SetType(0x8008B720, "struct POLY_FT4 *PRIM_GetCopy__FP8POLY_FT4(struct POLY_FT4 *Prim)")
del_items(0x8008B75C)
SetType(0x8008B75C, "int GetHighlightCol__FiPcUsUsUs(int Index, char *SelList, unsigned short P1Col, unsigned short P2Col, int P12Col)")
del_items(0x8008B7A4)
SetType(0x8008B7A4, "void PRIM_GetPrim__FPP8POLY_GT4_addr_8008B7A4(struct POLY_GT4 **Prim)")
del_items(0x8008B820)
SetType(0x8008B820, "void PRIM_GetPrim__FPP7LINE_F2(struct LINE_F2 **Prim)")
del_items(0x8008B89C)
SetType(0x8008B89C, "void PRIM_CopyPrim__FP8POLY_FT4T0(struct POLY_FT4 *Dest, struct POLY_FT4 *Source)")
del_items(0x8008B8C4)
SetType(0x8008B8C4, "int GetCreature__14TownToCreaturei(struct TownToCreature *this, int GameCreature)")
del_items(0x8008B8E0)
SetType(0x8008B8E0, "void SetItemGraphics__7CBlocksi(struct CBlocks *this, int Id)")
del_items(0x8008B908)
SetType(0x8008B908, "void SetObjGraphics__7CBlocksi(struct CBlocks *this, int Id)")
del_items(0x8008B930)
SetType(0x8008B930, "void DumpItems__7CBlocks(struct CBlocks *this)")
del_items(0x8008B954)
SetType(0x8008B954, "void DumpObjs__7CBlocks(struct CBlocks *this)")
del_items(0x8008B978)
SetType(0x8008B978, "void DumpMonsters__7CBlocks(struct CBlocks *this)")
del_items(0x8008B9A0)
SetType(0x8008B9A0, "int GetNumOfBlocks__7CBlocks(struct CBlocks *this)")
del_items(0x8008B9AC)
SetType(0x8008B9AC, "void CopyToGt4__9LittleGt4P8POLY_GT4(struct LittleGt4 *this, struct POLY_GT4 *Gt4)")
del_items(0x8008BA44)
SetType(0x8008BA44, "void InitFromGt4__9LittleGt4P8POLY_GT4ii(struct LittleGt4 *this, struct POLY_GT4 *Gt4, int nw, int nh)")
del_items(0x8008BAD4)
SetType(0x8008BAD4, "int GetNumOfFrames__7TextDatii(struct TextDat *this, int Creature, int Action)")
del_items(0x8008BB0C)
SetType(0x8008BB0C, "struct CCreatureHdr *GetCreature__7TextDati_addr_8008BB0C(struct TextDat *this, int Creature)")
del_items(0x8008BB84)
SetType(0x8008BB84, "int GetNumOfCreatures__7TextDat_addr_8008BB84(struct TextDat *this)")
del_items(0x8008BB98)
SetType(0x8008BB98, "void SetFileInfo__7TextDatPC13CTextFileInfoi_addr_8008BB98(struct TextDat *this, struct CTextFileInfo *NewInfo, int NewTexNum)")
del_items(0x8008BBA4)
SetType(0x8008BBA4, "struct PAL *GetPal__7TextDati_addr_8008BBA4(struct TextDat *this, int PalNum)")
del_items(0x8008BBC0)
SetType(0x8008BBC0, "struct FRAME_HDR *GetFr__7TextDati_addr_8008BBC0(struct TextDat *this, int FrNum)")
del_items(0x8008BBDC)
SetType(0x8008BBDC, "bool OVR_IsMemcardOverlayBlank__Fv()")
del_items(0x8008BC08)
SetType(0x8008BC08, "void OVR_LoadPregame__Fv()")
del_items(0x8008BC30)
SetType(0x8008BC30, "void OVR_LoadFrontend__Fv()")
del_items(0x8008BC58)
SetType(0x8008BC58, "void OVR_LoadGame__Fv()")
del_items(0x8008BC80)
SetType(0x8008BC80, "void OVR_LoadFmv__Fv()")
del_items(0x8008BCA8)
SetType(0x8008BCA8, "void OVR_LoadMemcard__Fv()")
del_items(0x8008BCD4)
SetType(0x8008BCD4, "void ClearOutOverlays__Fv()")
del_items(0x8008BD2C)
SetType(0x8008BD2C, "void ClearOut__7Overlay(struct Overlay *this)")
del_items(0x8008BDF0)
SetType(0x8008BDF0, "void Load__7Overlay(struct Overlay *this)")
del_items(0x8008BE60)
SetType(0x8008BE60, "enum OVER_TYPE OVR_GetCurrentOverlay__Fv()")
del_items(0x8008BE6C)
SetType(0x8008BE6C, "void LoadOver__FR7Overlay(struct Overlay *Ovr)")
del_items(0x8008BEC0)
SetType(0x8008BEC0, "void _GLOBAL__I_OVR_Open__Fv()")
del_items(0x8008C030)
SetType(0x8008C030, "enum OVER_TYPE GetOverType__7Overlay(struct Overlay *this)")
del_items(0x8008C03C)
SetType(0x8008C03C, "void StevesDummyPoll__Fv()")
del_items(0x8008C044)
SetType(0x8008C044, "void Lambo__Fv()")
del_items(0x8008C04C)
SetType(0x8008C04C, "struct CPlayer *__7CPlayerbi(struct CPlayer *this, bool Town, int mPlayerNum)")
del_items(0x8008C130)
SetType(0x8008C130, "void ___7CPlayer(struct CPlayer *this, int __in_chrg)")
del_items(0x8008C188)
SetType(0x8008C188, "void Load__7CPlayeri(struct CPlayer *this, int Id)")
del_items(0x8008C1E4)
SetType(0x8008C1E4, "void SetBlockXY__7CPlayerR7CBlocksR12PlayerStruct(struct CPlayer *this, struct CBlocks *Bg, struct PlayerStruct *Plr)")
del_items(0x8008C330)
SetType(0x8008C330, "void SetScrollTarget__7CPlayerR12PlayerStructR7CBlocks(struct CPlayer *this, struct PlayerStruct *Plr, struct CBlocks *Bg)")
del_items(0x8008C75C)
SetType(0x8008C75C, "int GetNumOfSpellAnims__FR12PlayerStruct(struct PlayerStruct *Plr)")
del_items(0x8008C7DC)
SetType(0x8008C7DC, "void Print__7CPlayerR12PlayerStructR7CBlocks(struct CPlayer *this, struct PlayerStruct *Plr, struct CBlocks *Bg)")
del_items(0x8008CCD0)
SetType(0x8008CCD0, "int FindAction__7CPlayerR12PlayerStruct(struct CPlayer *this, struct PlayerStruct *Plr)")
del_items(0x8008CD4C)
SetType(0x8008CD4C, "enum PACTION FindActionEnum__7CPlayerR12PlayerStruct(struct CPlayer *this, struct PlayerStruct *Plr)")
del_items(0x8008CDC8)
SetType(0x8008CDC8, "void Init__7CPlayer(struct CPlayer *this)")
del_items(0x8008CDD0)
SetType(0x8008CDD0, "void Dump__7CPlayer(struct CPlayer *this)")
del_items(0x8008CDD8)
SetType(0x8008CDD8, "void PRIM_GetPrim__FPP8POLY_FT4_addr_8008CDD8(struct POLY_FT4 **Prim)")
del_items(0x8008CE54)
SetType(0x8008CE54, "struct POLY_FT4 *PRIM_GetCopy__FP8POLY_FT4_addr_8008CE54(struct POLY_FT4 *Prim)")
del_items(0x8008CE90)
SetType(0x8008CE90, "void PRIM_CopyPrim__FP8POLY_FT4T0_addr_8008CE90(struct POLY_FT4 *Dest, struct POLY_FT4 *Source)")
del_items(0x8008CEB8)
SetType(0x8008CEB8, "int GetPlrOt__7CBlocksi(struct CBlocks *this, int PlayerNum)")
del_items(0x8008CECC)
SetType(0x8008CECC, "void SetDecompArea__7TextDatiiii(struct TextDat *this, int nDecX, int nDecY, int nPalX, int nPalY)")
del_items(0x8008CEE4)
SetType(0x8008CEE4, "int GetNumOfFrames__7TextDatii_addr_8008CEE4(struct TextDat *this, int Creature, int Action)")
del_items(0x8008CF1C)
SetType(0x8008CF1C, "int GetNumOfActions__7TextDati(struct TextDat *this, int Creature)")
del_items(0x8008CF40)
SetType(0x8008CF40, "struct CCreatureHdr *GetCreature__7TextDati_addr_8008CF40(struct TextDat *this, int Creature)")
del_items(0x8008CFB8)
SetType(0x8008CFB8, "int GetNumOfCreatures__7TextDat_addr_8008CFB8(struct TextDat *this)")
del_items(0x8008CFCC)
SetType(0x8008CFCC, "void SetFileInfo__7TextDatPC13CTextFileInfoi_addr_8008CFCC(struct TextDat *this, struct CTextFileInfo *NewInfo, int NewTexNum)")
del_items(0x8008CFD8)
SetType(0x8008CFD8, "void PROF_Open__Fv()")
del_items(0x8008D018)
SetType(0x8008D018, "bool PROF_State__Fv()")
del_items(0x8008D024)
SetType(0x8008D024, "void PROF_On__Fv()")
del_items(0x8008D034)
SetType(0x8008D034, "void PROF_Off__Fv()")
del_items(0x8008D040)
SetType(0x8008D040, "void PROF_CpuEnd__Fv()")
del_items(0x8008D070)
SetType(0x8008D070, "void PROF_CpuStart__Fv()")
del_items(0x8008D094)
SetType(0x8008D094, "void PROF_DrawStart__Fv()")
del_items(0x8008D0B8)
SetType(0x8008D0B8, "void PROF_DrawEnd__Fv()")
del_items(0x8008D0E8)
SetType(0x8008D0E8, "void PROF_Draw__FPUl(unsigned long *Ot)")
del_items(0x8008D2DC)
SetType(0x8008D2DC, "void PROF_Restart__Fv()")
del_items(0x8008D2FC)
SetType(0x8008D2FC, "void PSX_WndProc__FUilUl(unsigned int Msg, long wParam, unsigned long lParam)")
del_items(0x8008D3F4)
SetType(0x8008D3F4, "void PSX_PostWndProc__FUilUl(unsigned int Msg, long wParam, unsigned long lParam)")
del_items(0x8008D4A4)
SetType(0x8008D4A4, "void GoBackLevel__Fv()")
del_items(0x8008D51C)
SetType(0x8008D51C, "void GoWarpLevel__Fv()")
del_items(0x8008D554)
SetType(0x8008D554, "void PostLoadGame__Fv()")
del_items(0x8008D5F0)
SetType(0x8008D5F0, "void GoLoadGame__Fv()")
del_items(0x8008D678)
SetType(0x8008D678, "void PostNewLevel__Fv()")
del_items(0x8008D714)
SetType(0x8008D714, "void GoNewLevel__Fv()")
del_items(0x8008D768)
SetType(0x8008D768, "void PostGoBackLevel__Fv()")
del_items(0x8008D800)
SetType(0x8008D800, "void GoForwardLevel__Fv()")
del_items(0x8008D858)
SetType(0x8008D858, "void PostGoForwardLevel__Fv()")
del_items(0x8008D8F0)
SetType(0x8008D8F0, "void GoNewGame__Fv()")
del_items(0x8008D940)
SetType(0x8008D940, "void PostNewGame__Fv()")
del_items(0x8008D978)
SetType(0x8008D978, "void LevelToLevelInit__Fv()")
del_items(0x8008D9D0)
SetType(0x8008D9D0, "unsigned int GetPal__6GPaneli(struct GPanel *this, int Frm)")
del_items(0x8008DA14)
SetType(0x8008DA14, "struct GPanel *__6GPaneli(struct GPanel *this, int Ofs)")
del_items(0x8008DA6C)
SetType(0x8008DA6C, "void DrawFlask__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008DEEC)
SetType(0x8008DEEC, "void DrawSpeedBar__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008E370)
SetType(0x8008E370, "void DrawSpell__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008E4DC)
SetType(0x8008E4DC, "void DrawMsgWindow__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008E528)
SetType(0x8008E528, "int DrawDurThingy__6GPaneliiP10ItemStructi(struct GPanel *this, int X, int Y, struct ItemStruct *Item, int ItemType)")
del_items(0x8008E8E4)
SetType(0x8008E8E4, "void DrawDurIcon__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008E9D8)
SetType(0x8008E9D8, "void Print__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008EADC)
SetType(0x8008EADC, "struct PAL *GetPal__7TextDati_addr_8008EADC(struct TextDat *this, int PalNum)")
del_items(0x8008EAF8)
SetType(0x8008EAF8, "struct FRAME_HDR *GetFr__7TextDati_addr_8008EAF8(struct TextDat *this, int FrNum)")
del_items(0x8008EB14)
SetType(0x8008EB14, "void PrintCDWaitTask__FP4TASK(struct TASK *T)")
del_items(0x8008EBF8)
SetType(0x8008EBF8, "void InitCDWaitIcon__Fv()")
del_items(0x8008EC2C)
SetType(0x8008EC2C, "void STR_Debug__FP6SFXHDRPce(struct SFXHDR *sfh, char *e)")
del_items(0x8008EC40)
SetType(0x8008EC40, "void STR_SystemTask__FP4TASK(struct TASK *T)")
del_items(0x8008EC88)
SetType(0x8008EC88, "void STR_AllocBuffer__Fv()")
del_items(0x8008ECDC)
SetType(0x8008ECDC, "void STR_Init__Fv()")
del_items(0x8008EDA8)
SetType(0x8008EDA8, "struct SFXHDR *STR_InitStream__Fv()")
del_items(0x8008EEE0)
SetType(0x8008EEE0, "struct SFXHDR *STR_PlaySound__FUscic(unsigned short Name, char flag, int volume, char loop)")
del_items(0x8008F01C)
SetType(0x8008F01C, "void STR_setvolume__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F074)
SetType(0x8008F074, "void STR_PlaySFX__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F180)
SetType(0x8008F180, "void STR_pauseall__Fv()")
del_items(0x8008F1D0)
SetType(0x8008F1D0, "void STR_resumeall__Fv()")
del_items(0x8008F220)
SetType(0x8008F220, "void STR_CloseStream__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F28C)
SetType(0x8008F28C, "void STR_SoundCommand__FP6SFXHDRi(struct SFXHDR *sfh, int Command)")
del_items(0x8008F398)
SetType(0x8008F398, "char STR_Command__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F544)
SetType(0x8008F544, "void STR_DMAControl__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F60C)
SetType(0x8008F60C, "void STR_PlayStream__FP6SFXHDRPUci(struct SFXHDR *sfh, unsigned char *Src, int size)")
del_items(0x8008F7E8)
SetType(0x8008F7E8, "void STR_AsyncWeeTASK__FP4TASK(struct TASK *T)")
del_items(0x8008FAE0)
SetType(0x8008FAE0, "void STR_AsyncTASK__FP4TASK(struct TASK *T)")
del_items(0x8008FF0C)
SetType(0x8008FF0C, "void STR_StreamMainTask__FP6SFXHDRc(struct SFXHDR *sfh, char FileType)")
del_items(0x8009001C)
SetType(0x8009001C, "void SPU_Init__Fv()")
del_items(0x800900EC)
SetType(0x800900EC, "int SND_FindChannel__Fv()")
del_items(0x80090158)
SetType(0x80090158, "void SND_ClearBank__Fv()")
del_items(0x800901D0)
SetType(0x800901D0, "bool SndLoadCallBack__FPUciib(unsigned char *Mem, int ReadSoFar, int Size, bool LastChunk)")
del_items(0x80090248)
SetType(0x80090248, "void SND_LoadBank__Fi(int lvlnum)")
del_items(0x8009037C)
SetType(0x8009037C, "int SND_FindSFX__FUs(unsigned short Name)")
del_items(0x800903D0)
SetType(0x800903D0, "void SND_StopSnd__Fi(int voice)")
del_items(0x800903F4)
SetType(0x800903F4, "int SND_RemapSnd__Fi(int SFXNo)")
del_items(0x80090458)
SetType(0x80090458, "int SND_PlaySnd__FUsiii(unsigned short Name, int vol, int pan, int pitchadj)")
del_items(0x8009060C)
SetType(0x8009060C, "void AS_CallBack0__Fi(int handle)")
del_items(0x80090620)
SetType(0x80090620, "void AS_CallBack1__Fi(int handle)")
del_items(0x80090634)
SetType(0x80090634, "void AS_WasLastBlock__FiP6STRHDRP6SFXHDR(int ah, struct STRHDR | |
id (str): Room streamId
session_token (str): Session authentication token.
payload (UserId):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SuccessResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['session_token'] = \
session_token
kwargs['payload'] = \
payload
return self.call_with_http_info(**kwargs)
self.v1_room_id_membership_add_post = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/room/{id}/membership/add',
'operation_id': 'v1_room_id_membership_add_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'session_token',
'payload',
],
'required': [
'id',
'session_token',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'session_token':
(str,),
'payload':
(UserId,),
},
'attribute_map': {
'id': 'id',
'session_token': 'sessionToken',
},
'location_map': {
'id': 'path',
'session_token': 'header',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__v1_room_id_membership_add_post
)
def __v1_room_id_membership_demote_owner_post(
self,
id,
session_token,
payload,
**kwargs
):
"""Demotes room owner. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_room_id_membership_demote_owner_post(id, session_token, payload, async_req=True)
>>> result = thread.get()
Args:
id (str): Room streamId
session_token (str): Session authentication token.
payload (UserId):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SuccessResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['session_token'] = \
session_token
kwargs['payload'] = \
payload
return self.call_with_http_info(**kwargs)
self.v1_room_id_membership_demote_owner_post = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/room/{id}/membership/demoteOwner',
'operation_id': 'v1_room_id_membership_demote_owner_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'session_token',
'payload',
],
'required': [
'id',
'session_token',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'session_token':
(str,),
'payload':
(UserId,),
},
'attribute_map': {
'id': 'id',
'session_token': 'sessionToken',
},
'location_map': {
'id': 'path',
'session_token': 'header',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__v1_room_id_membership_demote_owner_post
)
def __v1_room_id_membership_promote_owner_post(
self,
id,
session_token,
payload,
**kwargs
):
"""Promotes user to owner. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_room_id_membership_promote_owner_post(id, session_token, payload, async_req=True)
>>> result = thread.get()
Args:
id (str): Room streamId
session_token (str): Session authentication token.
payload (UserId):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SuccessResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['session_token'] = \
session_token
kwargs['payload'] = \
payload
return self.call_with_http_info(**kwargs)
self.v1_room_id_membership_promote_owner_post = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/room/{id}/membership/promoteOwner',
'operation_id': 'v1_room_id_membership_promote_owner_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'session_token',
'payload',
],
'required': [
'id',
'session_token',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'session_token':
(str,),
'payload':
(UserId,),
},
'attribute_map': {
'id': 'id',
'session_token': 'sessionToken',
},
'location_map': {
'id': 'path',
'session_token': 'header',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__v1_room_id_membership_promote_owner_post
)
def __v1_room_id_membership_remove_post(
self,
id,
session_token,
payload,
**kwargs
):
"""Removes member from an existing room. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_room_id_membership_remove_post(id, session_token, payload, async_req=True)
>>> result = thread.get()
Args:
id (str): Room streamId
session_token (str): Session authentication token.
payload (UserId):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SuccessResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['session_token'] = \
session_token
kwargs['payload'] = \
payload
return self.call_with_http_info(**kwargs)
self.v1_room_id_membership_remove_post = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/room/{id}/membership/remove',
'operation_id': 'v1_room_id_membership_remove_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'session_token',
'payload',
],
'required': [
'id',
'session_token',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'session_token':
(str,),
'payload':
(UserId,),
},
'attribute_map': {
'id': 'id',
'session_token': 'sessionToken',
},
'location_map': {
'id': 'path',
'session_token': 'header',
| |
1),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 0),
(troop_get_slot, ":item_imod", "trp_temp_troop", 10),
(assign, ":target_obj", "$g_inside_obj_1"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_2"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 2),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 1),
(troop_get_slot, ":item_imod", "trp_temp_troop", 11),
(assign, ":target_obj", "$g_inside_obj_2"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_3"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 3),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 2),
(troop_get_slot, ":item_imod", "trp_temp_troop", 12),
(assign, ":target_obj", "$g_inside_obj_3"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_4"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 4),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 3),
(troop_get_slot, ":item_imod", "trp_temp_troop", 13),
(assign, ":target_obj", "$g_inside_obj_4"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_5"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 5),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 4),
(troop_get_slot, ":item_imod", "trp_temp_troop", 14),
(assign, ":target_obj", "$g_inside_obj_5"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_6"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 6),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 5),
(troop_get_slot, ":item_imod", "trp_temp_troop", 15),
(assign, ":target_obj", "$g_inside_obj_6"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_7"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 7),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 6),
(troop_get_slot, ":item_imod", "trp_temp_troop", 16),
(assign, ":target_obj", "$g_inside_obj_7"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_8"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 8),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 7),
(troop_get_slot, ":item_imod", "trp_temp_troop", 17),
(assign, ":target_obj", "$g_inside_obj_8"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_9"),
# (eq, "$g_horses_are_avaliable", 1),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 9),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 8),
(troop_get_slot, ":item_imod", "trp_temp_troop", 18),
(assign, ":target_obj", "$g_inside_obj_9"),
(try_end),
(try_begin),
(ge, ":item_no", 0),
(overlay_get_position, pos0, ":target_obj"),
(show_item_details_with_modifier, ":item_no", ":item_imod", pos0, 0),#test
#(multiplayer_get_my_player, ":my_player_no"),
#(player_get_troop_id, ":my_player_troop_no", ":my_player_no"),
# (try_begin),
# (call_script, "script_cf_multiplayer_is_item_default_for_troop", ":item_no", ":my_player_troop_no"),
# (store_item_value, ":item_value", ":item_no"),
# (show_item_details, ":item_no", pos0, ":item_value"),
# (else_try),
# (store_troop_faction, ":my_player_faction_no", ":my_player_troop_no"),
# (store_sub, ":faction_slot", ":my_player_faction_no", npc_kingdoms_begin),
# (val_add, ":faction_slot", slot_item_multiplayer_faction_price_multipliers_begin),
# (item_get_slot, ":price_multiplier", ":item_no", ":faction_slot"),
#(show_item_details, ":item_no", pos0, ":price_multiplier"),
# (try_end),
# (assign, "$g_current_opened_item_details", ":item_no"),
(assign, "$g_current_opened_item_details", ":target_obj"),
(try_end),
(else_try),
(assign, ":item_no", -1),
(try_begin),
(ge, ":object", "$g_presentation_obj_item_select_next"),
(store_sub, ":tested_object", ":object", "$g_presentation_obj_item_select_next"),
(store_mod, ":mod_value", ":tested_object", 2),
(store_sub, ":mod_value", 1, ":mod_value"),
(val_div, ":tested_object", 2),
(store_add, ":cur_slot", multi_data_item_button_indices_begin, ":tested_object"),
(troop_get_slot, ":item_no", "trp_multiplayer_data", ":cur_slot"),
(assign, ":target_obj", ":object"),
(val_add, ":target_obj", ":mod_value"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_1"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 1),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 0),
(assign, ":target_obj", "$g_inside_obj_1"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_2"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 2),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 1),
(assign, ":target_obj", "$g_inside_obj_2"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_3"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 3),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 2),
(assign, ":target_obj", "$g_inside_obj_3"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_4"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 4),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 3),
(assign, ":target_obj", "$g_inside_obj_4"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_5"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 5),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 4),
(assign, ":target_obj", "$g_inside_obj_5"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_6"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 6),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 5),
(assign, ":target_obj", "$g_inside_obj_6"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_7"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 7),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 6),
(assign, ":target_obj", "$g_inside_obj_7"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_8"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 8),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 7),
(assign, ":target_obj", "$g_inside_obj_8"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_9"),
# (eq, "$g_horses_are_avaliable", 1),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 9),
# (val_sub, ":player_slot_index", 1),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(troop_get_slot, ":item_no", "trp_temp_troop", 8),
(assign, ":target_obj", "$g_inside_obj_9"),
(try_end),
(try_begin),
# (eq, "$g_current_opened_item_details", ":item_no"),
(eq, "$g_current_opened_item_details", ":target_obj"),
(close_item_details),
(assign, "$g_current_opened_item_details", -1),
(try_end),
(try_end),
(else_try),
(assign, "$g_close_equipment_selection", 0),
(presentation_set_duration, 0),
(try_end),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
#(store_trigger_param_2, ":value"),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_troop_id, ":my_troop_no", ":my_player_no"),
(try_begin),
(eq, "$g_close_equipment_selection", 0),
(try_begin),
(eq, "$g_presentation_state", 0),
#clear item select popup
(try_for_range, ":cur_slot", multi_data_item_button_indices_begin, multi_data_item_button_indices_end),
(troop_set_slot, "trp_multiplayer_data", ":cur_slot", -1),
(try_end),
(assign, "$coop_num_available_items", 0),
(try_begin),
(eq, ":object", "$g_presentation_obj_item_select_1"),
(assign, "$g_presentation_state", 1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_one_handed_wpn, itp_type_goods),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_2"),
(assign, "$g_presentation_state", 2),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_one_handed_wpn, itp_type_goods),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_3"),
(assign, "$g_presentation_state", 3),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_one_handed_wpn, itp_type_goods),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_4"),
(assign, "$g_presentation_state", 4),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_one_handed_wpn, itp_type_goods),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_5"),
(assign, "$g_presentation_state", 5),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_head_armor, itp_type_body_armor),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_6"),
(assign, "$g_presentation_state", 6),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_body_armor, itp_type_foot_armor),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_7"),
(assign, "$g_presentation_state", 7),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_foot_armor, itp_type_hand_armor),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_8"),
(assign, "$g_presentation_state", 8),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_hand_armor, itp_type_pistol),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_9"),
# (eq, "$g_horses_are_avaliable", 1),
(assign, "$g_presentation_state", 9),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_get_selected_item_types, itp_type_horse, itp_type_one_handed_wpn),
(presentation_set_duration, 0),
# (start_presentation, "prsnt_coop_item_select"),
(try_end),
(else_try),
(gt, "$g_presentation_state", 0),
(store_sub, ":tested_object", ":object", "$g_presentation_obj_item_select_next"),
(val_div, ":tested_object", 2),
(assign, ":end_cond", multi_data_item_button_indices_end),
(try_for_range, ":cur_slot", multi_data_item_button_indices_begin, ":end_cond"),
(neg|troop_slot_eq, "trp_multiplayer_data", ":cur_slot", -1),
(store_sub, ":button_id", ":cur_slot", multi_data_item_button_indices_begin),
(eq, ":tested_object", ":button_id"),
(troop_get_slot, ":item_no", "trp_multiplayer_data", ":cur_slot"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, "$g_presentation_state"),
# (val_sub, ":player_slot_index", 1),
# (player_set_slot, ":my_player_no", ":player_slot_index", ":item_no"),
(store_add, ":imod_slot", ":cur_slot", 100),
(troop_get_slot, ":imod", "trp_temp_troop", ":imod_slot"),
(store_sub, ":cur_item_index", "$g_presentation_state", 1),
(store_add, ":cur_imod_index", ":cur_item_index", 10),
(troop_set_slot, "trp_temp_troop", ":cur_item_index", ":item_no"),
(troop_set_slot, "trp_temp_troop", ":cur_imod_index", ":imod"),
(troop_get_slot, ":party_inventory_slot", "trp_temp_troop", ":cur_slot"),
(store_sub, ":player_slot","$g_presentation_state", 1),
# (assign, reg5, ":object"),
# (assign, reg6, ":tested_object"),
# (assign, reg7, ":button_id"),
# (assign, reg8, ":cur_slot"),
# (assign, reg9, "$g_presentation_obj_item_select_next"),
# (str_store_item_name, s40, ":item_no"),
# (assign, reg3, ":party_inventory_slot"),
# (assign, reg2, ":player_slot"),
# (display_message, "@selected {reg2} / {reg3} .. {reg5} {reg6} {reg7} {reg8} {reg9} .. {s40}"),
(ge, ":party_inventory_slot", 10), #dont ask for item not in party inventory
(multiplayer_send_4_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_ask_for_selected_item, ":player_slot", ":item_no", ":party_inventory_slot"),
# (player_get_gold, ":player_gold", ":my_player_no"),
# (call_script, "script_multiplayer_calculate_cur_selected_items_cost", ":my_player_no", 1),
# (overlay_set_text, "$g_presentation_obj_item_select_12", "str_total_item_cost_reg0"),
# (try_begin),
# (ge, ":player_gold", reg0),
# (overlay_set_color, "$g_presentation_obj_item_select_12", 0xFFFFFF),
# (else_try),
# (overlay_set_color, "$g_presentation_obj_item_select_12", 0xFF0000),
# (try_end),
(assign, ":end_cond", 0), #break
(try_end),
(assign, "$g_presentation_state", 0),
(presentation_set_duration, 0),
(try_begin), #if server does not trigger presentation reload
(eq, ":end_cond", multi_data_item_button_indices_end), #if loop above failed
(start_presentation, "prsnt_coop_item_select"),
(try_end),
(try_end),
(try_begin),
(eq, ":object", "$g_presentation_obj_item_select_11"), #done button
(presentation_set_duration, 0),
(try_end),
# (try_begin),
# (eq, ":object", "$g_presentation_obj_item_select_10"),
# (call_script, "script_multiplayer_set_default_item_selections_for_troop", ":my_troop_no"),
# (presentation_set_duration, 0),
# (assign, "$g_presentation_state", 0),
# (start_presentation, "prsnt_coop_item_select"),
# (else_try),
# (eq, ":object", "$g_presentation_obj_item_select_11"),
# (call_script, "script_multiplayer_send_item_selections"),
# (presentation_set_duration, 0),
# (try_end),
(else_try),
(assign, "$g_close_equipment_selection", 0),
(presentation_set_duration, 0),
(try_end),
]),
(ti_on_presentation_mouse_press,
[(store_trigger_param_1, ":object"),
(store_trigger_param_2, ":mouse_state"),
(try_begin),
(eq, "$g_close_equipment_selection", 0),
(try_begin),
(eq, ":mouse_state", 1), #right click (clears the item slot)
(try_begin),
(eq, "$g_presentation_state", 0),
#(multiplayer_get_my_player, ":my_player_no"),
(try_begin),
(eq, ":object", "$g_presentation_obj_item_select_1"),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 0),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 0),
(troop_set_slot, "trp_temp_troop", 0, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 0, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_2"),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 1),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 1),
(troop_set_slot, "trp_temp_troop", 1, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 1, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_3"),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 2),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 2),
(troop_set_slot, "trp_temp_troop", 2, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 2, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_4"),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 3),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 3),
| |
combinations of queries to it were seen. For the purposes of calcuating these metrics, query strings and path parameters are both
considered query arguments, so that both <tt>http://example.com/foo?bar</tt> and <tt>http://example.com/foo;baz=bat</tt> would be collpased
into the URL above (contributing to the 250 figure).</p>
<p>The graph next to it shows how accesses to the queries are distributed; If there is a high peak on the left and a short tail <img style="background-color: #eee;" src='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFAAAAAUCAYAAAAa2LrXAAABCklEQVR4nGL8//8/g6Ki4v/79+8zMowCkgEAAAD//2IaaAcMdQAAAAD//xoNQAoBAAAA//8aDUAKAQAAAP//Gg1ACgEAAAD//xoNQAoBAAAA//+CByAjI8P/gXTIUAUAAAAA//8aTYEUAgAAAAD//xoNQAoBAAAA//9CCUBFRcXRbEwiAAAAAP//wkiBo4FIGgAAAAD//xrNwhQCAAAAAP//whqAo6mQeAAAAAD//xpNgRQCAAAAAP//whmAo6mQOAAAAAD//8KbAkcDkTAAAAAA//8imIVHAxE/AAAAAP//IqoMHA1E3AAAAAD//2IhViFyII6OXiMAAAAA//8arYUpBAAAAAD//wMAgZ0V1hUT8dEAAAAASUVORK5CYII=' title='most popular: 27% of accesses'/>, it means that
most accesses went to a few query terms, while if there is a low peak and a long tail <img style="background-color: #eee;" src='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFAAAAAUCAYAAAAa2LrXAAABBElEQVR4nGL8//8/g6Ki4v/79+8zMowCkgEAAAD//2IaaAcMdQAAAAD//xoNQAoBAAAA//8aDUAKAQAAAP//Gg1ACgEAAAD//xoNQAoBAAAA//8aDUAKAQAAAP//Gg1ACgEAAAD//2KBMRQVFf/D2KNtQuIBAAAA//8aTYEUAgAAAAD//xoNQAoBAAAA//8aDUAKAQAAAP//whqAyOXhKMAPAAAAAP//YsElQc9AfPDgPoOCgiK9rCMZ4KtUAQAAAP//Gs3CFAIAAAAA//8aDUAKAQAAAP//Gg1ACgEAAAD//xoNQCIAvvoAAAAA//8aDUAKAQAAAP//Gg1ACgEAAAD//wMAXUMU7UJYLdQAAAAASUVORK5CYII=' title='most popular: 4% of accesses'/>, it means that the queries were distributed over a larger
set of request-URIs. Mousing over the graph will show how much of the total accesses went to the most popular query, as a percentage.</p>
<p>In general, a large query diversity means that traffic to a particular service is more difficult to cache; as the query diversity number
approaches the number of accesses, there is less for the cache to exploit, and the hit rate will go down. However, if a reasonble amount of traffic
goes to the most popular query terms, it is still possible to achieve a decent hit rate.</p>
"""
print """
<h3>hits</h3>
<p>This column shows the percentage of hits for this URL on the left, and a graph representing their distribution on the right.</p>
<p>In these results, a <em>hit</em> is a response that is served without needing to contact another server. Hits are very fast
and do not cause any load on the systems behind the cache; high hit percentages are desirable.</p>
<dl>
<dt>negative hits (red)</dt>
<dd><em>Cached errors</em>. Certain error response status codes are cached, so that the
server isn't overwhelmed. A large number indicates that a lot of errrors are coming from your service.</dd>
<dt>disk hits (grey)</dt>
<dd><em>Hits served from disk</em>. Fast, but slower than from memory. A large number indicates that
this service isn't 'hot' enough to be memory cached consistently.</dd>
<dt>memory hits (green)</dt>
<dd><em>Hits served from memory</em>. Fastest possible service.</dd>
<dt>stale hits (yellow)</dt>
<dd><em>Hits served stale</em>, usually because of mechanisms like <tt>stale-while-revalidate</tt>.
A small number indicates that these mechanisms are working correctly. A large number indicates that the service
is taking a long time to revalidate, and/or has a small freshness lifetime.</dd>
</dl>
<h3>misses</h3>
<p>This column shows the percentage of misses for this URL on the left, and a graph representing their distribution on the right.</p>
<p>In these results, a <em>miss</em> is a response that requires some communication with an upstream server -- usually the
origin server, or another cache. Misses are slower and cause load on other systems, and so are less desirable. However, they are
unavoidable (the cache has to fill somehow), and in some situations are desirable because the back-end system has to be contacted.</p>
<dl>
<dt>client no-cache (yellow)</dt>
<dd><em>Client asked for a fresh copy</em>, with <tt>Pragma</tt> and/or <tt>Cache-Control</tt> request headers.
A large number indicates misconfigured and/or aggressive clients.</dd>
<dt>no validator (grey)</dt>
<dd><em>No validator present in the cached response, so the cache was forced to fetch a new copy</em>. Validators
like <tt>Last-Modified</tt> and <tt>ETag</tt> on responses allow caches to ask of a response has changed, rather
than getting a whole new one. This can save bandwidth if responses are large, and in some situations can avoid
server-side computation. A large number indicates that this feature is not being used.</dd>
<dt>validate unsuccessful (yellow)</dt>
<dd><em>Validator present, but validation unsuccessful</em>. A validator was present in the cached response, but
the server sent a new copy when contacted. A large number indicates that the server doesn't support validation (even
though it is sending response headers that can be used as validators), or that the responses are changing quickly on the
server.</dd>
<dt>validate successful (green)</dt>
<dd><em>Validator present, validation successful</em>. A validator was present in the cached response,
and the server indicated that the same response can be used; a new response was not fetched. A large number indicates
that the server is sending validators and responding to validating requests.</dd>
</dl>
<h3>miss msec</h3>
<p>This column shows the median miss time on the left, and a one-second wide histogram of miss times on the right, both in
millisecond units. This indicates how quickly upstream servers are able to send a response.</p>
<p>Note that the times shown are measured from the first request write <tt>read()</tt> to the last response byte <tt>write()</tt>;
therefore, slow clients can inflate this number if TCP buffers are filled. If the median is '1000', it indicates that the median
is outside the measured range, and is likely to be greater.</p>
<h3>kbytes</h3>
<p>This column shows the median size (in kilobytes) of successful (2xx) responses served to clients, as well as a 256k-wide
histogram of responses sizes on the right.</p>
<p>Note that the sizes shown are actual bytes served, including headers, compression, etc. If the median is '256', it indicates
that the median is at least that amount.</p>
<h3>status codes</h3>
<p>This column shows the distribution of status codes served to clients. They include;</p>
<dl>
<dt>0xx (grey)</dt>
<dd><em>No response status code sent</em> -- Usually because the client has disconnected (i.e., aborted before
any response headers have been sent). Large numbers can indicate client timeouts due to server-side delays.</dd>
<dt>1xx (white)</dt>
<dd><em>Informational</em> -- rare HTTP-related messages. Mostly harmless, but should not be seen in large numbers.</dd>
<dt>2xx (green)</dt>
<dd><em>Success</em> -- normal, successful responses.</dd>
<dt>3xx (blue)</dt>
<dd><em>Redirects, etc.</em> -- redirections to other URLs, <tt>300 Multiple Choices</tt> and <tt>304 Not Modified</tt>
responses. Large numbers usually indicate either redirections or 304's due to client-initiated validation (see <em>misses</em>).</dd>
<dt>4xx (yellow)</dt>
<dd><em>Client errors</em> -- problems with the request; e.g., <tt>401 Not Authorised</tt>,
<tt>403 Forbidden</tt>, <tt>404 Not Found</tt>. A large number indicates that clients are making bad requests often.</dd>
<dt>5xx (red)</dt>
<dd><em>Server errors</em> -- problems on the origin server and/or upstream proxies.
A large number indicates that there are frequent upstream failures.</dd>
</dl>
</div>
</body></html>"""
def hashUrl(url):
return hashlib.md5(url).digest()
TOKEN = r'(?:[^\(\)<>@,;:\\"/\[\]\?={} \t]+?)'
QUOTED_STRING = r'(?:"(?:\\"|[^"])*")'
PARAMETER = r'(?:%(TOKEN)s(?:=(?:%(TOKEN)s|%(QUOTED_STRING)s))?)' % locals()
LINK = r'<[^>]*>\s*(?:;\s*%(PARAMETER)s?\s*)*' % locals()
COMMA = r'(?:\s*(?:,\s*)+)'
LINK_SPLIT = r'%s(?=%s|\s*$)' % (LINK, COMMA)
link_splitter = re.compile(LINK_SPLIT)
def _splitstring(instr, item, split):
if not instr:
return []
return [ h.strip() for h in re.findall(r'%s(?=%s|\s*$)' % (item, split), instr)]
def _unquotestring(instr):
if instr[0] == instr[-1] == '"':
instr = instr[1:-1]
instr = re.sub(r'\\(.)', r'\1', instr)
return instr
def parse_link(instr):
out = {}
if not instr:
return out
for link in [h.strip() for h in link_splitter.findall(instr)]:
url, params = link.split(">", 1)
url = url[1:]
param_dict = {}
for param in _splitstring(params, PARAMETER, "\s*;\s*"):
try:
a, v = param.split("=", 1)
param_dict[a.lower()] = _unquotestring(v)
except ValueError:
param_dict[param.lower()] = None
out[url] = param_dict
return out
# freaking RHL doesn't do Python greater than 2.3, so we
# have to do some of it ourselves. Hmph.
from itertools import islice, repeat, count, imap, izip
from heapq import heapify, heappop
def tee(iterable):
def gen(next, data={}, cnt=[0]):
for i in count():
if i == cnt[0]:
item = data[i] = next()
cnt[0] += 1
else:
item = data.pop(i)
yield item
it = iter(iterable)
return (gen(it.next), gen(it.next))
def nsmallest(n, iterable, key=None):
in1, in2 = tee(iterable)
it = izip(imap(key, in1), count(), in2) # decorate
h = list(it)
heapify(h)
result = map(heappop, repeat(h, min(n, len(h))))
# return map(itemgetter(2), result) # undecorate
return [x[2] for x in result]
class CacheDict(UserDict):
def __init__(self, urls, max_size=1000, trim_to=.8, **args):
UserDict.__init__(self, **args)
self.urls = urls
self.max_size = max_size
self.trim_size = max_size * trim_to
def __getitem__(self, key):
return self.data[key][0]
def __setitem__(self, key, value):
self.data[key] = (value, len(self.data))
if len(self.data) > self.max_size:
self.trim()
def trim(self):
expired = nsmallest(self.max_size - int(self.trim_size), self.data, self.getkey)
for key in expired:
del self.data[key]
def getkey(self, key):
return self.urls[hashUrl(key)]
def usage():
print """\
Usage: %s [-n num] [-q] logfile
-d Debug parse errors
-n num Number of URLs to report (default: 100)
-q Use the query string as part of the URI
logfile Squid access log, or '-' for STDIN
""" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
import getopt
opts, args = getopt.getopt(sys.argv[1:], "dqn:")
opts = dict(opts)
| |
"xbrli:booleanItemType",
"xbrli:dateItemType", "num:percentItemType",
"xbrli:anyURIItemType"]
# There is type-checking we can do for these unitless types but we'll handle
# it elsewhere
required_type = self.get_concept(concept).get_details("type_name")
if required_type in unitlessTypes:
if unit_id is None:
return True
else:
raise OBUnitException(
"Unit {} given for unitless concept {} ({})".format(
unit_id, concept, required_type))
if required_type.startswith("solar-types:"):
print("I don't know how to validate {} yet, skipping for now".format(required_type))
return True
# TODO what other required_types might we get here?
if unit_id is None:
raise OBUnitException(
"No unit given for concept {}, requires type {}".format(
concept, required_type))
unit = self.tu.get_unit(unit_id)
if not unit:
raise OBNotFoundException(
"There is no unit with unit_id={} in the taxonomy."
.format(unit_id))
# TODO: utr.xml has unqualified type names, e.g. "frequencyItemType" and we're looking
# for a qualified type name e.g. "num-us:frequencyItemType". Should we assume that if
# it matches the part after the colon, then it's a match? Or do we need to validate the
# fully-qualified name?
if required_type.split(":")[-1] == unit.item_type:
return True
else:
# TODO raise here?
return False
# Unit has fields: unit_id, unit_name, ns_unit, item_type,
# item_type_date, symbol, definition, base_standard, status, version_date
def set(self, concept_name, value, **kwargs):
"""
Adds a fact to the document.
The concept_name and the context together identify the fact to set,
and the value will be stored for that fact. If concept_name and context
match a fact already in the document, the old fact will be overwritten.
Otherwise, a new fact is created.
Args:
unit_name : string
a string naming a unit
precision : int
number of places past the decimal point (for decimal type only)
context : Context
can be specified by kwargs duration, instant and entity
duration : str or dict
"forever" or {"start": <datetime>, "end": <datetime>}
instant : datetime
entity : str
entity name
*Axis = <value> (not implemented)
the name of any Axis in a table in this entrypoint
"""
if "unit_name" in kwargs:
unit_name = kwargs.pop("unit_name")
valid_unit_name = self.tu.is_unit(unit_name)
else:
unit_name = None
valid_unit_name = False
if "precision" in kwargs:
precision = kwargs.pop("precision")
if not self.is_concept_writable(concept_name):
raise OBConceptException(
"{} is not a writeable concept".format(concept_name))
concept = self.get_concept(concept_name)
if "context" in kwargs:
context = kwargs.pop("context")
elif len(list(kwargs.keys())) > 0:
# turn the remaining keyword args into a Context object -- this
# is just syntactic sugar to make this method easier to call.
# TODO this block will not work
period = concept.get_details("period_type")
if period not in kwargs and period in self._default_context:
kwargs[period.value] = self._default_context[period]
context = Context(**kwargs)
else:
context = None
# Use default values, if any have been set, to fill in missing fields of context:
if len(self._default_context) > 0:
context = self._fill_in_context_from_defaults(context, concept)
if not self.validate_context(concept_name, context):
raise OBContextException(
"Insufficient context for {}".format(concept_name))
# Check unit type:
if not self._dev_validation_off and not self._is_valid_unit(concept_name, unit_name):
raise OBUnitException(
"{} is not a valid unit name for {}".format(unit_name, concept_name))
# check datatype of given value against concept
if not self._dev_validation_off and not concept.validate_datatype(value):
raise OBTypeException(
"{} is the wrong datatype for {}".format(value, concept_name))
table = self.get_table_for_concept(concept_name)
context = table.store_context(context) # dedupes, assigns ID
f = Fact(concept_name, context, unit_name, value)
# TODO pass in decimals? Fact expects decimals and "precision" is slightly different
# self.facts is nested dict keyed first on table then on context ID
# and finally on concept:
if not table.get_name() in self.facts:
self.facts[table.get_name()] = {}
if not context.get_id() in self.facts[table.get_name()]:
self.facts[table.get_name()][context.get_id()] = {}
# TODO simplify above with defaultdict
self.facts[table.get_name()][context.get_id()][concept_name] = f
# Or: we could keep facts in a flat list, and get() could look them
# up by getting context from hypercube and getting fact from context
def get(self, concept, context=None):
"""
Returns the value of a fact previously set. The concept
and context together identify the fact to read.
"""
# look up the facts we have
# complain if no value for concept
# complain if context needed and not providedd
# TODO support case where no context passed in? (use default
# context I guess?)
# TODO support getting by kwargs instead of getting by context?
# TODO if only some fields of the context are given, try matching
# on just those fields and disregarding the rest -- e.g. if no
# period is given, return a fact that matches the other fields, whatever
# its duration is?
# TODO a function that returns multiple, i.e. all facts for given
# concept regardless of context, or vice versa.
table = self.get_table_for_concept(concept)
context = table.lookup_context(context)
if table.get_name() in self.facts:
if context.get_id() in self.facts[table.get_name()]:
if concept in self.facts[table.get_name()][context.get_id()]:
return self.facts[table.get_name()][context.get_id()][concept]
return None
def get_all_facts(self):
"""
Returns a flattened list of Fact instances -- all of the facts that
have been set in this document so far.
"""
all_facts = []
for table_dict in list(self.facts.values()):
for context_dict in list(table_dict.values()):
for fact in list(context_dict.values()):
all_facts.append(fact)
return all_facts
def _make_unit_tag(self, unit_id):
"""
Return an XML tag for a unit (such as kw, kwh, etc). Fact tags can
reference this unit tag.
"""
# See http://www.xbrl.org/utr/utr.xml
unit = Element("unit", attrib={"id": unit_id})
measure = SubElement(unit, "measure")
measure.text = "units:{}".format(unit_id)
# because http://www.xbrl.org/2009/utr is included as xmlns:units
return unit
def _toXML_tag(self):
"""
Returns an XML tag which is the root of an XML tree representing
the entire document contents (all contexts, units, and facts) in XML form.
"""
# The root element:
xbrl = Element("xbrl", attrib = self.namespaces)
# Add "link:schemaRef" for the taxonomy that goes with this document:
link = SubElement(xbrl, "link:schemaRef",
attrib = {"xlink:href": self.taxonomy_name,
"xlink:type": "simple"})
# Add a context tag for each context we want to reference:
for hypercube in list(self._tables.values()):
tags = hypercube._toXML()
for tag in tags:
xbrl.append(tag)
facts = self.get_all_facts()
required_units = set([fact.unit for fact in self.get_all_facts() \
if fact.unit is not None])
for unit in required_units:
# Add a unit tag defining each unit we want to reference:
xbrl.append(self._make_unit_tag(unit))
for fact in self.get_all_facts():
xbrl.append( fact._toXML() )
return xbrl
def to_XML(self, filename):
"""
Exports XBRL as XML to the given filename.
To ensure future support use the method with the same name and functionality in Parser.
"""
xbrl = self._toXML_tag()
tree = xml.etree.ElementTree.ElementTree(xbrl)
# Apparently every XML file should start with this, which ElementTree
# doesn't do:
# <?xml version="1.0" encoding="utf-8"?>
tree.write(filename)
def to_XML_string(self):
"""
Returns XBRL as an XML string.
To ensure future support use the method with the same name and functionality in Parser.
"""
xbrl = self._toXML_tag()
return xml.etree.ElementTree.tostring(xbrl).decode()
def to_JSON(self, filename):
"""
Exports XBRL as JSON to the given filename.
To ensure future support use the method with the same name and functionality in Parser.
"""
outfile = open(filename, "w")
outfile.write(self.to_JSON_string())
outfile.close()
def to_JSON_string(self):
"""
Returns XBRL as a JSON string
To ensure future support use the method with the same name and functionality in Parser.
"""
masterJsonObj = {
"documentType": "http://www.xbrl.org/WGWD/YYYY-MM-DD/xbrl-json",
"prefixes": self.namespaces,
"dtsReferences": [],
"facts": {}
}
masterJsonObj["dtsReferences"].append({
"type": "schema",
"href": self.taxonomy_name
})
facts = self.get_all_facts()
for fact in facts:
masterJsonObj["facts"][fact.id] = fact._toJSON()
return json.dumps(masterJsonObj)
def set_default_context(self, dictionary):
"""
Dictionary can have keys: "entity", PeriodType.instant, PeriodType.duration, and also
axes.
Sets these values as the defaults for this document. These values
are used to fill in any fields that are missing from any contexts
passed into set(). For example, if you set the "entity" as a default
on the document, then after that you can leave "entity" out of your
contexts and the default entity will be used.
"""
self._default_context.update(dictionary)
def _fill_in_context_from_defaults(self, context, concept):
"""
context: None, or a Context object that may be missing some fields
concept: a Concept object (used to determine what fields are required)
Returns a Context object that has had all its required fields filled in
from the default context (see set_default_context()) if possible.
"""
period = concept.get_details("period_type") # PeriodType.instant or PeriodType.duration
if context is None:
# Create context from default entity and default period:
| |
we should exit the loop.
guard_val = eval_arg(self.guard, context)
if (self.loop_type.lower() == "until"):
guard_val = (not guard_val)
if (not guard_val):
break
# Execute the loop body.
done = False
context.goto_executed = False
for s in self.body:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('WHILE loop eval statement: %r' % s)
if (not isinstance(s, VBA_Object)):
continue
s.eval(context=context)
# Has 'Exit For' been called?
if (not context.loop_stack[-1]):
# Yes we have. Stop this loop.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("WHILE loop: exited loop with 'Exit For'")
done = True
break
# Was there an error that will make us jump to an error handler?
if (context.must_handle_error()):
done = True
break
context.clear_error()
# Did we just run a GOTO? If so we should not run the
# statements after the GOTO.
#if (isinstance(s, Goto_Statement)):
if (context.goto_executed):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("GOTO executed. Go to next loop iteration.")
break
# Finished with the loop due to 'Exit For' or error?
if (done):
break
# Does it look like this might be an infinite loop? Check this by
# seeing if any changes have been made to the variables in the loop
# guard.
curr_guard_vals = _get_guard_variables(self, context)
if (curr_guard_vals == old_guard_vals):
num_no_change += 1
if (num_no_change >= context.max_static_iters):
log.warn("Possible infinite While loop detected. Exiting loop.")
break
else:
num_no_change = 0
# Remove tracking of this loop.
if (len(context.loop_stack) > 0):
context.loop_stack.pop()
if (len(context.loop_object_stack) > 0):
context.loop_object_stack.pop()
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('WHILE loop: end.')
# Run the error handler if we have one and we broke out of the statement
# loop with an error.
context.handle_error(params)
while_type = CaselessKeyword("While") | CaselessKeyword("Until")
while_clause = Optional(CaselessKeyword("Do").suppress()) + while_type("type") + boolean_expression("guard")
simple_while_statement = while_clause("clause") + Suppress(EOS) + Group(statement_block('body')) \
+ (CaselessKeyword("Loop").suppress() |
CaselessKeyword("Wend").suppress() |
(CaselessKeyword("End").suppress() + CaselessKeyword("While").suppress()))
simple_while_statement.setParseAction(While_Statement)
# --- DO statement -----------------------------------------------------------
class Do_Statement(VBA_Object):
def __init__(self, original_str, location, tokens):
super(Do_Statement, self).__init__(original_str, location, tokens)
self.is_loop = True
self.loop_type = tokens.type
self.guard = tokens.guard
if (self.guard is None):
self.guard = True
self.body = tokens[0]
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('parsed %r as %s' % (self, self.__class__.__name__))
def __repr__(self):
r = "Do\\n" + str(self.body) + "\\n"
r += "Loop " + str(self.loop_type) + " " + str(self.guard)
return r
def to_python(self, context, params=None, indent=0):
"""
Convert this loop to Python code.
"""
# Boilerplate used by the Python.
#boilerplate = _boilerplate_to_python(indent)
indent_str = " " * indent
# Set up doing this for loop in Python.
loop_start = indent_str + "exit_all_loops = False\n"
loop_start += indent_str + "max_errors = " + str(VBA_Object.loop_upper_bound/10000) + "\n"
loop_start += indent_str + "while (True):\n"
loop_start += indent_str + " " * 4 + "if exit_all_loops:\n"
loop_start += indent_str + " " * 8 + "break\n"
loop_start = indent_str + "# Start emulated loop.\n" + loop_start
# Set up initialization of variables used in the loop.
loop_init, prog_var = _loop_vars_to_python(self, context, indent)
# Save the updated variable values.
save_vals = _updated_vars_to_python(self, context, indent)
# Set up the loop body.
loop_str = str(self).replace('"', '\\"').replace("\\n", " :: ")
if (len(loop_str) > 100):
loop_str = loop_str[:100] + " ..."
loop_body = ""
# Report progress.
loop_body += indent_str + " " * 4 + "if (" + prog_var + " % 100) == 0:\n"
loop_body += indent_str + " " * 8 + "safe_print(\"Done \" + str(" + prog_var + ") + \" iterations of Do While loop '" + loop_str + "'\")\n"
loop_body += indent_str + " " * 4 + prog_var + " += 1\n"
# No infinite loops.
loop_body += indent_str + " " * 4 + "if (" + prog_var + " > " + str(VBA_Object.loop_upper_bound) + ") or " + \
"(vm_context.get_general_errors() > max_errors):\n"
loop_body += indent_str + " " * 8 + "raise ValueError('Infinite Loop')\n"
loop_body += to_python(self.body, context, params=params, indent=indent+4, statements=True)
# Simulate the do-while loop by checking the not of the guard and exiting if needed at
# the end of the loop body.
if (self.loop_type.lower() == "until"):
loop_body += indent_str + " " * 4 + "if (" + to_python(self.guard, context) + "):\n"
else:
loop_body += indent_str + " " * 4 + "if (not (" + to_python(self.guard, context) + ")):\n"
loop_body += indent_str + " " * 8 + "break\n"
# Full python code for the loop.
python_code = loop_init + "\n" + \
loop_start + "\n" + \
loop_body + "\n" + \
save_vals + "\n"
# Done.
return python_code
def eval(self, context, params=None):
# Exit if an exit function statement was previously called.
if (context.exit_func):
return
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('DO loop: start: ' + str(self))
# Do not bother running loops with empty bodies.
if (len(self.body) == 0):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("DO loop: empty body. Skipping.")
return
# Assign all const variables first.
do_const_assignments(self.body, context)
# Some loop guards check the readystate value on an object. To simulate this
# will will just go around the loop a small fixed # of times.
max_loop_iters = VBA_Object.loop_upper_bound
if (".readyState" in str(self.guard)):
log.info("Limiting # of iterations of a .readyState loop.")
max_loop_iters = 5
# See if we can convert the loop to Python and directly emulate it.
if (_eval_python(self, context, params=params, add_boilerplate=True)):
return
# Track that the current loop is running.
context.loop_stack.append(True)
context.loop_object_stack.append(self)
# Get the initial values of all the variables that appear in the loop guard.
old_guard_vals = _get_guard_variables(self, context)
# Loop until the loop is broken out of or we violate the loop guard.
num_iters = 0
num_no_change = 0
while (True):
# Break infinite loops.
if (num_iters > max_loop_iters):
log.error("Maximum loop iterations exceeded. Breaking loop.")
break
num_iters += 1
# Execute the loop body.
done = False
context.goto_executed = False
for s in self.body:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('DO loop eval statement: %r' % s)
if (not isinstance(s, VBA_Object)):
continue
s.eval(context=context)
# Has 'Exit For' been called?
if (not context.loop_stack[-1]):
# Yes we have. Stop this loop.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Do loop: exited loop with 'Exit For'")
done = True
break
# Was there an error that will make us jump to an error handler?
if (context.must_handle_error()):
done = True
break
context.clear_error()
# Did we just run a GOTO? If so we should not run the
# statements after the GOTO.
#if (isinstance(s, Goto_Statement)):
if (context.goto_executed):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("GOTO executed. Go to next loop iteration.")
break
# Finished with the loop due to 'Exit For'?
if (done):
break
# Test the loop guard to see if we should exit the loop.
guard_val = eval_arg(self.guard, context)
if (self.loop_type.lower() == "until"):
guard_val = (not guard_val)
if (not guard_val):
break
# Does it look like this might be an infinite loop? Check this by
# seeing if any changes have been made to the variables in the loop
# guard.
curr_guard_vals = _get_guard_variables(self, context)
if (curr_guard_vals == old_guard_vals):
num_no_change += 1
if (num_no_change >= context.max_static_iters):
log.warn("Possible infinite While loop detected. Exiting loop.")
break
else:
num_no_change = 0
# Remove tracking of this loop.
if (len(context.loop_stack) > 0):
context.loop_stack.pop()
if (len(context.loop_object_stack) > 0):
context.loop_object_stack.pop()
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('DO loop: end.')
# Run the error handler if we have one and we broke out of the statement
# loop with an error.
context.handle_error(params)
simple_do_statement = Suppress(CaselessKeyword("Do")) + Suppress(EOS) + \
Group(statement_block('body')) + \
Suppress(CaselessKeyword("Loop")) + Optional(while_type("type") + boolean_expression("guard"))
simple_do_statement.setParseAction(Do_Statement)
# --- SELECT statement -----------------------------------------------------------
class Select_Statement(VBA_Object):
def __init__(self, original_str, location, tokens):
super(Select_Statement, self).__init__(original_str, location, tokens)
self.select_val = tokens.select_val
self.cases = tokens.cases
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('parsed %r as %s' % (self, self.__class__.__name__))
def __repr__(self):
r = ""
r += str(self.select_val)
for case in self.cases:
r += str(case)
r += "End Select"
return r
def _to_python_if(self, context, indent, case, first):
"""
Convert a single Select case to a Python if, elif, or else statement.
"""
# Get the value being checked as | |
'\a', end='')
sys.stdout.flush()
except Exception as e:
print(e)
def handler(signal_received, frame):
pretty_print(
'sys0', get_string('sigint_detected')
+ Style.NORMAL + Fore.RESET
+ get_string('goodbye'), 'warning')
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
global username
global donation_level
global avrport
global hashrate_list
global debug
global rig_identifier
global discord_presence
global shuffle_ports
global SOC_TIMEOUT
global ser
global usbi2c_port
global usbi2c_baudrate
if not Path(str(Settings.DATA_DIR) + '/Settings.cfg').is_file():
print(
Style.BRIGHT + get_string('basic_config_tool')
+ Settings.DATA_DIR
+ get_string('edit_config_file_warning'))
print(
Style.RESET_ALL + get_string('dont_have_account')
+ Fore.YELLOW + get_string('wallet') + Fore.RESET
+ get_string('register_warning'))
username = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_username')
+ Fore.RESET + Style.BRIGHT)
mining_key = input(Style.RESET_ALL + Fore.YELLOW
+ get_string("ask_mining_key")
+ Fore.RESET + Style.BRIGHT)
if not mining_key:
mining_key = "None"
else:
mining_key = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_message'))
portlist = serial.tools.list_ports.comports(include_links=True)
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT + Fore.RESET
+ ' ' + str(port))
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_notice'))
port_names = []
for port in portlist:
port_names.append(port.device)
usbi2c_port = ''
while True:
current_port = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_avrport')
+ Fore.RESET + Style.BRIGHT)
if current_port in port_names:
usbi2c_port += current_port
break
else:
print(Style.RESET_ALL + Fore.RED
+ 'Please enter a valid COM port from the list above')
try:
ser.close()
sleep(2)
except:
pass
usbi2c_baudrate = input(
Style.RESET_ALL + Fore.YELLOW
+ str("USBI2C Baudrate (e.g. 115200): ")
+ Fore.RESET + Style.BRIGHT)
Settings.BAUDRATE = int(usbi2c_baudrate)
try:
ser = Serial(usbi2c_port, baudrate=int(Settings.BAUDRATE),
timeout=float(Settings.AVR_TIMEOUT))
sleep(2)
except Exception as e:
pretty_print(
'sys'
+ port_num(usbi2c_port),
get_string('board_connection_error')
+ str(usbi2c_port)
+ get_string('board_connection_error2')
+ Style.NORMAL
+ Fore.RESET
+ f' (avr connection err: {e})',
'error')
raise Exception("USBI2C Adaptor port access failure")
try:
ser.write(bytes(str("scn"+ Settings.USBI2C_EOL),
encoding=Settings.ENCODING))
sleep(1)
debug_output(usbi2c_port + ': Reading I2CS scan result from the board')
result = ser.read_until(b'\n').decode()
ser.flush()
except Exception as e:
debug_output(usbi2c_port + f': USBI2C scan failure: {e}')
raise Exception("USBI2C Adaptor I2CS address scan failure")
finally:
ser.close()
print(Style.RESET_ALL
+ Style.BRIGHT + Fore.RESET
+ ' ' + str(result))
avrport = ''
while True:
current_port = input(
Style.RESET_ALL + Fore.YELLOW
+ 'Enter your I2C slave address (e.g. 8): '
+ Fore.RESET + Style.BRIGHT)
avrport += current_port
confirmation = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_anotherport')
+ Fore.RESET + Style.BRIGHT)
if confirmation == 'y' or confirmation == 'Y':
avrport += ','
else:
break
Settings.CRC8_EN = input(
Style.RESET_ALL + Fore.YELLOW
+ 'Do you want to turn on CRC8 feature? (Y/n): '
+ Fore.RESET + Style.BRIGHT)
Settings.CRC8_EN = Settings.CRC8_EN.lower()
if len(Settings.CRC8_EN) == 0: Settings.CRC8_EN = "y"
elif Settings.CRC8_EN != "y": Settings.CRC8_EN = "n"
rig_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_identifier')
+ Fore.RESET + Style.BRIGHT)
if rig_identifier == 'y' or rig_identifier == 'Y':
rig_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_name')
+ Fore.RESET + Style.BRIGHT)
else:
rig_identifier = 'None'
donation_level = '0'
if osname == 'nt' or osname == 'posix':
donation_level = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_donation_level')
+ Fore.RESET + Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
donation_level = int(donation_level)
config["AVR Miner"] = {
'username': username,
'avrport': avrport,
'donate': donation_level,
'language': lang,
'identifier': rig_identifier,
'debug': 'n',
"soc_timeout": 45,
"avr_timeout": 10,
"delay_start": Settings.DELAY_START,
"crc8_en": Settings.CRC8_EN,
"discord_presence": "y",
"periodic_report": 60,
"shuffle_ports": "y",
"mining_key": mining_key,
"usbi2c_port": usbi2c_port,
"usbi2c_baudrate": usbi2c_baudrate}
with open(str(Settings.DATA_DIR)
+ '/Settings.cfg', 'w') as configfile:
config.write(configfile)
avrport = avrport.split(',')
print(Style.RESET_ALL + get_string('config_saved'))
hashrate_list = [0] * len(avrport)
else:
config.read(str(Settings.DATA_DIR) + '/Settings.cfg')
username = config["AVR Miner"]['username']
avrport = config["AVR Miner"]['avrport']
avrport = avrport.replace(" ", "").split(',')
donation_level = int(config["AVR Miner"]['donate'])
debug = config["AVR Miner"]['debug']
rig_identifier = config["AVR Miner"]['identifier']
Settings.SOC_TIMEOUT = int(config["AVR Miner"]["soc_timeout"])
Settings.AVR_TIMEOUT = float(config["AVR Miner"]["avr_timeout"])
Settings.DELAY_START = int(config["AVR Miner"]["delay_start"])
Settings.CRC8_EN = config["AVR Miner"]["crc8_en"]
discord_presence = config["AVR Miner"]["discord_presence"]
shuffle_ports = config["AVR Miner"]["shuffle_ports"]
Settings.REPORT_TIME = int(config["AVR Miner"]["periodic_report"])
hashrate_list = [0] * len(avrport)
usbi2c_port = config["AVR Miner"]['usbi2c_port']
Settings.BAUDRATE = int(config["AVR Miner"]['usbi2c_baudrate'])
def greeting():
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string('greeting_morning')
elif current_hour == 12:
greeting = get_string('greeting_noon')
elif current_hour > 12 and current_hour < 18:
greeting = get_string('greeting_afternoon')
elif current_hour >= 18:
greeting = get_string('greeting_evening')
else:
greeting = get_string('greeting_back')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + '\n Unofficial Duino-Coin USBI2C AVR Miner'
+ Style.RESET_ALL + Fore.MAGENTA
+ f' {Settings.VER}' + Fore.RESET
+ ' 2021-2022')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL + Fore.MAGENTA
+ 'https://github.com/JK-Rolling '
+ 'https://github.com/revoxhere/duino-coin')
if lang != "english":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + lang.capitalize()
+ " translation: " + Fore.MAGENTA
+ get_string("translation_autor"))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('avr_on_port')
+ Style.BRIGHT + Fore.YELLOW
+ ' '.join(avrport))
if osname == 'nt' or osname == 'posix':
print(
Style.DIM + Fore.MAGENTA + Settings.BLOCK
+ Style.NORMAL + Fore.RESET
+ get_string('donation_level') + Style.BRIGHT
+ Fore.YELLOW + str(donation_level))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('algorithm')
+ Style.BRIGHT + Fore.YELLOW
+ 'DUCO-S1A ⚙ AVR diff')
if rig_identifier != "None":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('rig_identifier')
+ Style.BRIGHT + Fore.YELLOW + rig_identifier)
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + str(greeting) + ', '
+ Style.BRIGHT + Fore.YELLOW
+ str(username) + '!\n')
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(905158274490441808)
RPC.connect()
Thread(target=update_rich_presence).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate_list), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(shares[0]) + "/"
+ str(shares[0] + shares[1])
+ " accepted shares",
large_image="avrminer",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
def pretty_print(sender: str = "sys0",
msg: str = None,
state: str = "success"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("avr"):
bg_color = Back.MAGENTA
else:
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "info":
fg_color = Fore.BLUE
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
printlock.acquire()
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ bg_color + Style.BRIGHT + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
printlock.release()
def share_print(id, type, accept, reject, total_hashrate,
computetime, diff, ping, reject_cause=None):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |avrN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
try:
diff = get_prefix("", int(diff), 0)
except:
diff = "?"
try:
total_hashrate = get_prefix("H/s", total_hashrate, 2)
except:
total_hashrate = "? H/s"
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
if reject_cause:
share_str += f"{Style.NORMAL}({reject_cause}) "
fg_color = Fore.RED
with thread_lock():
printlock.acquire()
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + Back.MAGENTA + Fore.RESET
+ " avr" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.MAGENTA
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
printlock.release()
def usbi2c_write(ser,com,data):
serlock.acquire()
ser.write(bytes(str(str(com)
+ Settings.USBI2C_SEPARATOR
+ "w"
+ Settings.USBI2C_SEPARATOR
+ str(data)
+ Settings.USBI2C_EOL),
encoding=Settings.ENCODING))
serlock.release()
def usbi2c_read(ser,com):
serlock.acquire()
ser.write(bytes(str(str(com)
+ Settings.USBI2C_SEPARATOR
+ "r"
+ Settings.USBI2C_EOL),
encoding=Settings.ENCODING))
data = ser.read_until(b'$').decode().strip(Settings.USBI2C_EOL).split(Settings.USBI2C_SEPARATOR)
serlock.release()
return data
def flush_i2c(ser,com,period=2):
# period is not useful here. ignore
with thread_lock():
usbi2c_write(ser,"fl",com)
sleep(0.1)
def crc8(data):
crc = 0
for i in range(len(data)):
byte = data[i]
for b in range(8):
fb_bit = (crc ^ byte) | |
get_abs_path_from_package_name(packagename):
"""Get absolute file path of the package.
In order to retrieve the path, the package module will be imported.
Args:
packagename: The full package name, e.g., package.subpackage.
Returns:
An absolute path or None if path does not exist.
Raises:
TypeError: Wrong input arguments.
"""
utils.check_type(packagename, 'packagename', str)
errors = []
mod = load_module_from_module_name(packagename, errors, reload_mod=False,
include_test_functions=False)
# The __init__ module is not a package, but anything else whose file's name
# ends with __init__.py(c) is.
if not mod or mod.__name__.split('.')[-1] == '__init__':
return None
filename = inspect.getfile(mod)
if filename.endswith('__init__.py'):
return filename[:-len('__init__.py')]
elif filename.endswith('__init__.pyc'):
return filename[:-len('__init__.pyc')]
else:
return None
def get_root_relative_path(path, root):
"""Get the root-relative URL path.
Example:
(path='/home/user/app/dir/templates/tests.py',
root='/home/user/app/dir') -> 'templates/tests.py'
Args:
path: An absolute path of a file or directory.
root: The root directory which equals the websites root path.
Returns:
A string representing the root-relative URL path or None if path is not
relative to root.
Raises:
TypeError: Wrong input arguments.
"""
utils.check_type(path, 'path', str)
utils.check_type(root, 'root', str)
if not root or not os.path.isdir(root):
return None
path_parts = path.split(os.sep)
root_parts = root.split(os.sep)
if not root_parts[-1]:
del root_parts[-1]
if root_parts != path_parts[:len(root_parts)]:
return None
return '/'.join(path_parts[len(root_parts):])
def _wrap_function(func):
"""Wraps a function with a wrapper that discards a single arguments.
This is to allow functions to be used as methods similarly to using
staticmethod, but still contain a reference to the class and return True for
inspect.ismethod calls.
Args:
func: A function that takes no arguments to wrap.
Returns:
A one argument function that wraps the provided function.
"""
return functools.wraps(func)(lambda self: func())
def wrap_test_functions(module):
"""Wraps test functions with a new TestCase subclass.
Args:
module: A module to search for test functions to wrap.
"""
_, _, submodule_name = module.__name__.rpartition('.')
test_case_name = ''.join(
s[:1].upper() + s[1:] for s in submodule_name.split('_'))
test_case_name += 'WrappedTestFunctions' # Prevent name collision.
if hasattr(module, test_case_name):
return
test_functions = {}
for name, obj in module.__dict__.items():
if inspect.isfunction(obj) and name.startswith('test'):
test_functions[name] = _wrap_function(obj)
if test_functions:
test_case = type(test_case_name, (unittest.TestCase,), test_functions)
test_case.__module__ = module.__name__
module.__dict__[test_case_name] = test_case
def load_module_from_module_name(fullname, errors_out=None, reload_mod=False,
include_import_error=False,
include_test_functions=True):
"""Load a module.
Errors which occurred while importing the module are appended to errors_out.
An error is appended as (fullname, error_traceback) tuple.
Args:
fullname: The full module name, e.g., package.subpackage.module.
errors_out: A list to which import error tracebacks are appended, or None
to ignore errors.
reload_mod: Try to remove module before reloading it.
include_import_error: Whether to include an error tuple in case the module
does not exist.
include_test_functions: Whether to wrap test functions into a test case
class. Note that if this is False and the module has already been
imported with include_test_functions=True, then the module will still
have the wrapped test functions from before.
Returns:
The loaded module or None if the module could not be imported.
Raises:
TypeError: Wrong input arguments.
"""
utils.check_type(fullname, 'fullname', str)
utils.check_type(errors_out, 'errors_out', (types.NoneType, list))
utils.check_type(reload_mod, 'reload_mod', bool)
utils.check_type(include_import_error, 'include_import_error', bool)
utils.check_type(include_test_functions, 'include_test_functions', bool)
module = None
try:
loaded_by_import = False
if fullname not in sys.modules:
__import__(fullname)
loaded_by_import = True
module = sys.modules[fullname]
if reload_mod and not loaded_by_import:
module = reload(module)
if include_test_functions:
wrap_test_functions(module)
# pylint: disable-msg=W0703
except:
if errors_out is not None:
if include_import_error:
errors_out.append((fullname, traceback.format_exc()))
else:
# The error should only be noted if the exception was raised from
# within the imported module, rather than being raised because the
# module did not exist. To check this, walk the traceback stack and
# look for a module with __name__ == fullname (or None due to the
# broken module being cleared).
tb = sys.exc_info()[2]
while tb:
if tb.tb_frame.f_globals['__name__'] in [None, fullname]:
errors_out.append((fullname, traceback.format_exc()))
break
tb = tb.tb_next
return module
# TODO(schuppe): too many local variables - pylint: disable-msg=R0914,R0912
def get_module_names_in_package(packagename, module_pattern, depth=0):
"""Get names of all modules in the package that match module_pattern.
Since all modules found at the location of package and below are
considered, a traversal of the entire directory structure is
needed. This can be an expansive operation if your path will contain
many subdirectories and/or files.
You can limit the depth of the traveral with the depth argument. 1
means only the first level is considered, 2, the first and the
second level is considered, and so on. A value of 0 indicates that
the entire directory tree should be traversed.
Args:
packagename: The name of the package, e.g., package.subpackage.
module_pattern: The pattern of modules to look at.
depth: Maximum depth of directory traversal.
Returns:
A list of full names of modules in this package that match the pattern.
Raises:
TypeError: Wrong input arguments.
ValueError: If depth is smaller than 0.
"""
utils.check_type(packagename, 'packagename', str)
utils.check_type(module_pattern, 'module_pattern', str)
utils.check_type(depth, 'depth', int)
if depth < 0:
raise ValueError('"depth" must be at least 0.')
path = get_abs_path_from_package_name(packagename)
if not path:
return []
path_default_depth = len([x for x in path.split(os.sep) if x])
res = []
packagename_split = packagename.split('.')
path_split = path.split(os.sep)
for root, _, files in os.walk(path):
if depth != 0:
current_depth = len([x for x in root.split(os.sep) if x])
if current_depth >= path_default_depth + depth:
continue
for file_ in files:
short_modulename, ext = os.path.splitext(file_)
# Only Python modules should be considered and they should be
# considered only once. This means we have to ensure to not use
# source *and* compiled module files of the same module.
# At first we check if the current file is a sourcefile. If it
# is, no further checks are needed and we go ahead and use it.
if ext != '.py':
if ext != '.pyc':
# If it is not a source file nor a compiled file, we ignore it.
continue
if ext == '.pyc' and os.path.isfile(os.path.join(root, file_[:-1])):
# If it is a compiled file and there is a source file, too,
# we ignore this file, because we are using the source file
# already.
continue
# In addition, only modules matching a certain pattern will be
# loaded.
if re.match(module_pattern, short_modulename):
# The module name = packagename + diff between path and root
# (=subpackage name) + current file's name.
root_split = root.split(os.sep)
if root_split == path_split:
subpackage_split = []
else:
subpackage_split = root_split[len(path_split) - 1:]
module_split = packagename_split + subpackage_split
modulename = '.'.join(module_split + [short_modulename])
res.append(modulename)
res.sort()
return res
def _is_prefix(prefix, name):
"""Determines whether one fullname is a prefix of another.
Args:
prefix: The fullname that might be a prefix.
name: The entire fullname.
Returns:
A boolean indicating whether or not the first fullname is a prefix of the
second.
"""
prefix_parts = prefix.split('.') if prefix else []
name_parts = name.split('.') if name else []
return name_parts[:len(prefix_parts)] == prefix_parts
def _is_in_test_package(fullname, conf):
"""Determines whether the given fullname is in a configured test package.
Args:
fullname: The name of a test object.
conf: The configuration to use.
Returns:
A boolean indicating whether or not the fullname is valid (in one of the
configured test packages).
"""
return any(_is_prefix(p, fullname) for p in conf.test_package_names)
def get_requested_object(fullname, conf):
"""Gets the TestObject with the particular name.
Args:
fullname: Name of the object, e.g. package.module.class.method.
conf: The configuration to use.
Returns:
A TestObject, which might be BadTest if the object cannot be found or
loaded correctly.
Raises:
TypeError: Wrong input arguments.
"""
utils.check_type(fullname, 'fullname', str)
utils.check_type(conf, 'conf', config.Config)
if not fullname:
return Root()
if not _is_in_test_package(fullname, conf):
msg = ('Test object %s is not contained in one of the configured '
'test_package_names in aeta.yaml.' % fullname)
return BadTest(fullname, False, [(fullname, msg)])
errors_out = []
# package or module
module = load_module_from_module_name(
fullname, errors_out, include_import_error=False,
include_test_functions=conf.include_test_functions)
if errors_out:
return BadTest(fullname, True, errors_out)
if module:
if get_abs_path_from_package_name(fullname):
return Package(fullname)
return Module(fullname, module)
elements = fullname.split('.')
# test case class
module = load_module_from_module_name(
'.'.join(elements[:-1]), errors_out, include_import_error=False,
include_test_functions=conf.include_test_functions)
if errors_out:
return BadTest(fullname, True, errors_out)
if module:
cls = getattr(module, elements[-1], None)
if cls and inspect.isclass(cls):
return Class(fullname, cls)
module = load_module_from_module_name(
'.'.join(elements[:-2]), errors_out, include_import_error=False,
include_test_functions=conf.include_test_functions)
if errors_out:
return BadTest(fullname, | |
import logging
import os
import copy
import mxnet as mx
from .classifier import Classifier
from .dataset import get_dataset
from .nets import *
from .pipeline import train_image_classification
from .utils import *
from ..base import BaseTask, compile_scheduler_options, create_scheduler
from ...core import *
from ...core.loss import *
from ...core.optimizer import *
from ...scheduler.resource import get_cpu_count, get_gpu_count
from ...utils import update_params
__all__ = ['ImageClassification']
logger = logging.getLogger(__name__)
class ImageClassification(BaseTask):
"""AutoGluon Task for classifying images based on their content
"""
Classifier = Classifier
@staticmethod
def Dataset(path=None, name=None, train=True, input_size=224, crop_ratio=0.875, *args, **kwargs):
"""Dataset for AutoGluon image classification tasks.
May either be a :class:`autogluon.task.image_classification.ImageFolderDataset`, :class:`autogluon.task.image_classification.RecordDataset`,
or a popular dataset already built into AutoGluon ('mnist', 'fashionmnist', 'cifar10', 'cifar100', 'imagenet').
Parameters
----------
path : str, optional
The data location. If using :class:`ImageFolderDataset`,
image folder`path/to/the/folder` should be provided.
If using :class:`RecordDataset`, the `path/to/*.rec` should be provided.
name : str, optional
Which built-in dataset to use, will override all other options if specified.
The options are: 'mnist', 'fashionmnist', 'cifar', 'cifar10', 'cifar100', 'imagenet'
train : bool, optional, default = True
Whether this dataset should be used for training or validation.
input_size : int
The input image size.
crop_ratio : float
Center crop ratio (for evaluation only).
Returns
-------
Dataset object that can be passed to `task.fit()`, which is actually an :class:`autogluon.space.AutoGluonObject`.
To interact with such an object yourself, you must first call `Dataset.init()` to instantiate the object in Python.
"""
if name is None:
if path is None:
raise ValueError("Either `path` or `name` must be present in Dataset(). "
"If `name` is provided, it will override the rest of the arguments.")
return get_dataset(path=path, train=train, name=name,
input_size=input_size, crop_ratio=crop_ratio,
*args, **kwargs)
@staticmethod
def fit(dataset,
net=Categorical('ResNet50_v1b', 'ResNet18_v1b'),
optimizer=NAG(
learning_rate=Real(1e-3, 1e-2, log=True),
wd=Real(1e-4, 1e-3, log=True),
multi_precision=False
),
loss=SoftmaxCrossEntropyLoss(),
split_ratio=0.8,
batch_size=64,
input_size=224,
epochs=20,
final_fit_epochs=None,
ensemble=1,
metric='accuracy',
nthreads_per_trial=60,
ngpus_per_trial=1,
hybridize=True,
scheduler_options=None,
search_strategy='random',
search_options=None,
plot_results=False,
verbose=False,
time_limits=None,
resume=False,
output_directory='checkpoint/',
visualizer='none',
num_trials=2,
dist_ip_addrs=None,
auto_search=True,
lr_config=Dict(
lr_mode='cosine',
lr_decay=0.1,
lr_decay_period=0,
lr_decay_epoch='40,80',
warmup_lr=0.0,
warmup_epochs=0
),
tricks=Dict(
last_gamma=False,
use_pretrained=True,
use_se=False,
mixup=False,
mixup_alpha=0.2,
mixup_off_epoch=0,
label_smoothing=False,
no_wd=False,
teacher_name=None,
temperature=20.0,
hard_weight=0.5,
batch_norm=False,
use_gn=False),
**kwargs):
# TODO: ensemble and hybridize are not in docstring
"""
Fit image classification models to a given dataset.
Parameters
----------
dataset : str or :meth:`autogluon.task.ImageClassification.Dataset`
Training dataset containing images and their associated class labels.
Popular image datasets built into AutoGluon can be used by specifying their name as a string (options: ‘mnist’, ‘fashionmnist’, ‘cifar’, ‘cifar10’, ‘cifar100’, ‘imagenet’).
input_size : int
Size of images in the dataset (pixels).
net : str or :class:`autogluon.space.Categorical`
Which existing neural network models to consider as candidates.
optimizer : str or :class:`autogluon.space.AutoGluonObject`
Which optimizers to consider as candidates for learning the neural network weights.
batch_size : int
How many images to group in each mini-batch during gradient computations in training.
epochs: int
How many epochs to train the neural networks for at most.
final_fit_epochs: int, default None
Final fit epochs, the same number of epochs will be used as during the HPO if not specified.
metric : str or callable object
Evaluation metric by which predictions will be ulitmately evaluated on test data.
loss : `mxnet.gluon.loss`
Loss function used during training of the neural network weights.
num_trials : int
Maximal number of hyperparameter configurations to try out.
split_ratio : float, default = 0.8
Fraction of dataset to use for training (rest of data is held-out for tuning hyperparameters).
The final returned model may be fit to all of the data (after hyperparameters have been selected).
time_limits : int
Approximately how long `fit()` should run for (wallclock time in seconds).
`fit()` will stop training new models after this amount of time has elapsed (but models which have already started training will continue to completion).
nthreads_per_trial : int
How many CPUs to use in each trial (ie. single training run of a model).
ngpus_per_trial : int
How many GPUs to use in each trial (ie. single training run of a model).
output_directory : str
Checkpoints of the search state are written to
os.path.join(output_directory, 'exp1.ag')
scheduler_options : dict
Extra arguments passed to __init__ of scheduler, to configure the
orchestration of training jobs during hyperparameter-tuning.
search_strategy : str
Which hyperparameter search algorithm to use.
Options include: 'random' (random search), 'skopt' (SKopt Bayesian optimization), 'grid' (grid search), 'hyperband' (Hyperband), 'rl' (reinforcement learner)
search_options : dict
Auxiliary keyword arguments to pass to the searcher that performs hyperparameter optimization.
resume : bool
If True, the hyperparameter search is started from state loaded from
os.path.join(output_directory, 'exp1.ag')
dist_ip_addrs : list
List of IP addresses corresponding to remote workers, in order to leverage distributed computation.
verbose : bool
Whether or not to print out intermediate information during training.
plot_results : bool
Whether or not to generate plots summarizing training process.
visualizer : str
Describes method to visualize training progress during `fit()`. Options: ['mxboard', 'tensorboard', 'none'].
auto_search : bool
If True, enables automatic suggestion of network types and hyper-parameter ranges adaptively based on provided dataset.
Returns
-------
:class:`autogluon.task.image_classification.Classifier` object which can make predictions on new data and summarize what happened during `fit()`.
Examples
--------
>>> from autogluon import ImageClassification as task
>>> dataset = task.Dataset(train_path='data/train',
>>> test_path='data/test')
>>> classifier = task.fit(dataset,
>>> nets=ag.space.Categorical['resnet18_v1', 'resnet34_v1'],
>>> time_limits=time_limits,
>>> ngpus_per_trial=1,
>>> num_trials = 4)
>>> test_data = task.Dataset('~/data/test', train=False)
>>> test_acc = classifier.evaluate(test_data)
Bag of tricks are used on image classification dataset
lr_config
----------
lr-mode : type=str, default='step'.
describes how learning rate should be adjusted over the course of training. Options include: 'cosine', 'poly'.
lr-decay : type=float, default=0.1.
decay rate of learning rate. default is 0.1.
lr-decay-period : type=int, default=0.
interval for periodic learning rate decays. default is 0 to disable.
lr-decay-epoch : type=str, default='10,20,30'.
epochs at which learning rate decays. epochs=40, default is 10, 20, 30.
warmup-lr : type=float, default=0.0.
starting warmup learning rate. default is 0.0.
warmup-epochs : type=int, default=0.
number of warmup epochs.
tricks
----------
last-gamma', default= True.
whether to init gamma of the last BN layer in each bottleneck to 0.
use-pretrained', default= True.
enable using pretrained model from gluon.
use_se', default= False.
use SE layers or not in resnext. default is false.
mixup', default= False.
whether train the model with mix-up. default is false.
mixup-alpha', type=float, default=0.2.
beta distribution parameter for mixup sampling, default is 0.2.
mixup-off-epoch', type=int, default=0.
how many last epochs to train without mixup, default is 0.
label-smoothing', default= True.
use label smoothing or not in training. default is false.
no-wd', default= True.
whether to remove weight decay on bias, and beta/gamma for batchnorm layers.
teacher', type=str, default=None.
teacher model for distillation training
temperature', type=float, default=20.
temperature parameter for distillation teacher model
hard-weight', type=float, default=0.5.
weight for the loss of one-hot label for distillation training
batch-norm', default= True.
enable batch normalization or not in vgg. default is false.
use-gn', default= False.
whether to use group norm.
"""
checkpoint = os.path.join(output_directory, 'exp1.ag')
if auto_search:
# The strategies can be injected here, for example: automatic suggest some hps
# based on the dataset statistics
net = auto_suggest_network(dataset, net)
nthreads_per_trial = get_cpu_count() if nthreads_per_trial > get_cpu_count() else nthreads_per_trial
ngpus_per_trial = get_gpu_count() if ngpus_per_trial > get_gpu_count() else ngpus_per_trial
final_fit_epochs = final_fit_epochs if final_fit_epochs else epochs
train_image_classification.register_args(
dataset=dataset,
net=net,
optimizer=optimizer,
loss=loss,
metric=metric,
num_gpus=ngpus_per_trial,
split_ratio=split_ratio,
batch_size=batch_size,
input_size=input_size,
epochs=epochs,
final_fit_epochs=final_fit_epochs,
verbose=verbose,
num_workers=nthreads_per_trial,
hybridize=hybridize,
final_fit=False,
tricks=tricks,
lr_config=lr_config
)
# Backward compatibility:
grace_period = kwargs.get('grace_period')
if grace_period is not None:
if scheduler_options is None:
scheduler_options = {'grace_period': grace_period}
else:
assert 'grace_period' not in scheduler_options, \
"grace_period appears both in scheduler_options and as direct argument"
logger.warning(
"grace_period is deprecated, use "
"scheduler_options={'grace_period': ...} instead")
scheduler_options = copy.copy(scheduler_options)
scheduler_options['grace_period'] = grace_period
scheduler_options = compile_scheduler_options(
scheduler_options=scheduler_options,
search_strategy=search_strategy,
search_options=search_options,
nthreads_per_trial=nthreads_per_trial,
ngpus_per_trial=ngpus_per_trial,
checkpoint=checkpoint,
num_trials=num_trials,
time_out=time_limits,
resume=resume,
visualizer=visualizer,
time_attr='epoch',
reward_attr='classification_reward',
dist_ip_addrs=dist_ip_addrs,
epochs=epochs)
results = BaseTask.run_fit(
train_image_classification, search_strategy, scheduler_options,
plot_results=plot_results)
args = sample_config(train_image_classification.args, results['best_config'])
kwargs = {'num_classes': results['num_classes'], 'ctx': mx.cpu(0)}
model = get_network(args.net, **kwargs)
multi_precision = optimizer.kwvars['multi_precision'] if 'multi_precision' in optimizer.kwvars else False
update_params(model, results.pop('model_params'), multi_precision)
if ensemble > 1:
models = [model]
scheduler = create_scheduler(
train_image_classification, search_strategy, scheduler_options)
for i in range(1, ensemble):
resultsi = scheduler.run_with_config(results['best_config'])
| |
group in new_answer_groups:
new_rule_specs = []
for rule_spec in group['rule_specs']:
if is_valid_math_equation(
rule_spec['inputs']['x']):
new_rule_specs.append(rule_spec)
group['rule_specs'] = new_rule_specs
# Otherwise, if at least one rule_input is an algebraic
# expression, we remove all other rule inputs that are
# numeric expressions.
elif exp_domain.TYPE_VALID_ALGEBRAIC_EXPRESSION in (
types_of_inputs):
new_interaction_id = (
exp_domain.TYPE_VALID_ALGEBRAIC_EXPRESSION)
for group in new_answer_groups:
new_rule_specs = []
for rule_spec in group['rule_specs']:
if is_valid_algebraic_expression(
rule_spec['inputs']['x']):
new_rule_specs.append(rule_spec)
group['rule_specs'] = new_rule_specs
else:
new_interaction_id = (
exp_domain.TYPE_VALID_NUMERIC_EXPRESSION)
# Removing answer groups that have no rule specs left after
# the filtration done above.
new_answer_groups = [
answer_group for answer_group in new_answer_groups if (
len(answer_group['rule_specs']) != 0)]
# Removing feedback keys, from voiceovers_mapping and
# translations_mapping, that correspond to the rules that
# got deleted.
old_answer_groups_feedback_keys = [
answer_group['outcome'][
'feedback']['content_id'] for answer_group in (
question_state_dict[
'interaction']['answer_groups'])]
new_answer_groups_feedback_keys = [
answer_group['outcome'][
'feedback']['content_id'] for answer_group in (
new_answer_groups)]
content_ids_to_delete = set(
old_answer_groups_feedback_keys) - set(
new_answer_groups_feedback_keys)
for content_id in content_ids_to_delete:
if content_id in question_state_dict['recorded_voiceovers'][
'voiceovers_mapping']:
del question_state_dict['recorded_voiceovers'][
'voiceovers_mapping'][content_id]
if content_id in question_state_dict[
'written_translations']['translations_mapping']:
del question_state_dict['written_translations'][
'translations_mapping'][content_id]
question_state_dict['interaction']['id'] = new_interaction_id
question_state_dict['interaction']['answer_groups'] = (
new_answer_groups)
if question_state_dict['interaction']['solution']:
correct_answer = question_state_dict['interaction'][
'solution']['correct_answer']['ascii']
correct_answer = exp_domain.clean_math_expression(
correct_answer)
question_state_dict['interaction'][
'solution']['correct_answer'] = correct_answer
return question_state_dict
@classmethod
def _convert_state_v35_dict_to_v36_dict(cls, question_state_dict):
"""Converts from version 35 to 36. Version 35 adds translation support
for interaction customization arguments. This migration converts
customization arguments whose schemas have been changed from unicode to
SubtitledUnicode or html to SubtitledHtml. It also populates missing
customization argument keys on all interactions, removes extra
customization arguments, normalizes customization arguments against
its schema, and changes PencilCodeEditor's customization argument
name from initial_code to initialCode.
Args:
question_state_dict: dict. A dict where each key-value pair
represents respectively, a state name and a dict used to
initialize a State domain object.
Returns:
dict. The converted question_state_dict.
"""
max_existing_content_id_index = -1
translations_mapping = question_state_dict[
'written_translations']['translations_mapping']
for content_id in translations_mapping:
# Find maximum existing content_id index.
content_id_suffix = content_id.split('_')[-1]
# Possible values of content_id_suffix are a digit, or from
# a 'outcome' (from 'default_outcome'). If the content_id_suffix
# is not a digit, we disregard it here.
if content_id_suffix.isdigit():
max_existing_content_id_index = max(
max_existing_content_id_index,
int(content_id_suffix)
)
# Move 'html' field to 'translation' field and set 'data_format'
# to 'html' for all WrittenTranslations.
for lang_code in translations_mapping[content_id]:
translations_mapping[
content_id][lang_code]['data_format'] = 'html'
translations_mapping[
content_id][lang_code]['translation'] = (
translations_mapping[content_id][lang_code]['html'])
del translations_mapping[content_id][lang_code]['html']
interaction_id = question_state_dict['interaction']['id']
if interaction_id is None:
question_state_dict['next_content_id_index'] = (
max_existing_content_id_index + 1)
return question_state_dict
class ContentIdCounter:
"""This helper class is used to keep track of
next_content_id_index and new_content_ids, and provides a
function to generate new content_ids.
"""
new_content_ids = []
def __init__(self, next_content_id_index):
"""Initializes a ContentIdCounter object.
Args:
next_content_id_index: int. The next content id index.
"""
self.next_content_id_index = next_content_id_index
def generate_content_id(self, content_id_prefix):
"""Generate a new content_id from the prefix provided and
the next content id index.
Args:
content_id_prefix: str. The prefix of the content_id.
Returns:
str. The generated content_id.
"""
content_id = '%s%i' % (
content_id_prefix,
self.next_content_id_index)
self.next_content_id_index += 1
self.new_content_ids.append(content_id)
return content_id
content_id_counter = (
ContentIdCounter(max_existing_content_id_index + 1))
ca_dict = question_state_dict['interaction']['customization_args']
if (interaction_id == 'PencilCodeEditor' and
'initial_code' in ca_dict):
ca_dict['initialCode'] = ca_dict['initial_code']
del ca_dict['initial_code']
# Retrieve a cached version (state schema v35) of
# interaction_specs.json to ensure that this migration remains
# stable even when interaction_specs.json is changed.
ca_specs = [
domain.CustomizationArgSpec(
ca_spec_dict['name'],
ca_spec_dict['description'],
ca_spec_dict['schema'],
ca_spec_dict['default_value']
) for ca_spec_dict in (
interaction_registry.Registry
.get_all_specs_for_state_schema_version(36)[
interaction_id]['customization_arg_specs']
)
]
for ca_spec in ca_specs:
schema = ca_spec.schema
ca_name = ca_spec.name
content_id_prefix = 'ca_%s_' % ca_name
# We only have to migrate unicode to SubtitledUnicode or
# list of html to list of SubtitledHtml. No interactions
# were changed from html to SubtitledHtml.
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
is_subtitled_html_list_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_LIST and
schema['items']['type'] ==
schema_utils.SCHEMA_TYPE_CUSTOM and
schema['items']['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
if is_subtitled_unicode_spec:
# Default is a SubtitledHtml dict or SubtitleUnicode dict.
new_value = copy.deepcopy(ca_spec.default_value)
# If available, assign value to html or unicode_str.
if ca_name in ca_dict:
new_value['unicode_str'] = ca_dict[ca_name]['value']
# Assign content_id.
new_value['content_id'] = (
content_id_counter
.generate_content_id(content_id_prefix)
)
ca_dict[ca_name] = {'value': new_value}
elif is_subtitled_html_list_spec:
new_value = []
if ca_name in ca_dict:
# Assign values to html fields.
for html in ca_dict[ca_name]['value']:
new_value.append({
'html': html, 'content_id': None
})
else:
# Default is a list of SubtitledHtml dict.
new_value.extend(copy.deepcopy(ca_spec.default_value))
# Assign content_ids.
for subtitled_html_dict in new_value:
subtitled_html_dict['content_id'] = (
content_id_counter
.generate_content_id(content_id_prefix)
)
ca_dict[ca_name] = {'value': new_value}
elif ca_name not in ca_dict:
ca_dict[ca_name] = {'value': ca_spec.default_value}
(
customization_args_util
.validate_customization_args_and_values(
'interaction',
interaction_id,
ca_dict,
ca_specs)
)
question_state_dict['next_content_id_index'] = (
content_id_counter.next_content_id_index)
for new_content_id in content_id_counter.new_content_ids:
question_state_dict[
'written_translations'][
'translations_mapping'][new_content_id] = {}
question_state_dict[
'recorded_voiceovers'][
'voiceovers_mapping'][new_content_id] = {}
return question_state_dict
@classmethod
def _convert_state_v36_dict_to_v37_dict(cls, question_state_dict):
"""Converts from version 36 to 37. Version 37 changes all rules with
type CaseSensitiveEquals to Equals.
Args:
question_state_dict: dict. A dict where each key-value pair
represents respectively, a state name and a dict used to
initialize a State domain object.
Returns:
dict. The converted question_state_dict.
"""
if question_state_dict['interaction']['id'] == 'TextInput':
answer_group_dicts = question_state_dict[
'interaction']['answer_groups']
for answer_group_dict in answer_group_dicts:
for rule_spec_dict in answer_group_dict['rule_specs']:
if rule_spec_dict['rule_type'] == 'CaseSensitiveEquals':
rule_spec_dict['rule_type'] = 'Equals'
return question_state_dict
@classmethod
def _convert_state_v37_dict_to_v38_dict(cls, question_state_dict):
"""Converts from version 37 to 38. Version 38 adds a customization arg
for the Math interactions that allows creators to specify the letters
that would be displayed to the learner.
Args:
question_state_dict: dict. A dict where each key-value pair
represents respectively, a state name and a dict used to
initialize a State domain object.
Returns:
dict. The converted question_state_dict.
"""
if question_state_dict['interaction']['id'] in (
'AlgebraicExpressionInput', 'MathEquationInput'):
variables = set()
for group in question_state_dict[
'interaction']['answer_groups']:
for rule_spec in group['rule_specs']:
rule_input = rule_spec['inputs']['x']
for variable in expression_parser.get_variables(
rule_input):
# Replacing greek letter names with greek symbols.
if len(variable) > 1:
variable = (
constants.GREEK_LETTER_NAMES_TO_SYMBOLS[
variable])
variables.add(variable)
customization_args = question_state_dict[
'interaction']['customization_args']
customization_args.update({
'customOskLetters': {
'value': sorted(variables)
}
})
return question_state_dict
@classmethod
def _convert_state_v38_dict_to_v39_dict(cls, question_state_dict):
"""Converts from version 38 to 39. Version 39 adds a new
customization arg to NumericExpressionInput interaction which allows
creators to modify the placeholder text.
Args:
question_state_dict: dict. A dict where each key-value pair
represents respectively, a state name and a dict used to
initialize a State domain object.
Returns:
dict. The converted question_state_dict.
"""
if question_state_dict['interaction']['id'] == 'NumericExpressionInput':
customization_args = question_state_dict[
'interaction']['customization_args']
customization_args.update({
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': (
'Type an expression here, using only numbers.')
}
}
})
question_state_dict['written_translations']['translations_mapping'][
'ca_placeholder_0'] = {}
question_state_dict['recorded_voiceovers']['voiceovers_mapping'][
'ca_placeholder_0'] = {}
return question_state_dict
@classmethod
def _convert_state_v39_dict_to_v40_dict(cls, question_state_dict):
"""Converts from version 39 to 40. Version 40 converts TextInput rule
inputs from NormalizedString to SetOfNormalizedString.
Args:
question_state_dict: dict. A dict where each key-value pair
represents respectively, a state name and a dict used to
initialize a State domain object.
Returns:
dict. The converted question_state_dict.
"""
if question_state_dict['interaction']['id'] == 'TextInput':
answer_group_dicts = question_state_dict[
'interaction']['answer_groups']
for answer_group_dict in answer_group_dicts:
rule_type_to_inputs = collections.defaultdict(set)
for rule_spec_dict in answer_group_dict['rule_specs']:
rule_type = rule_spec_dict['rule_type']
rule_inputs = rule_spec_dict['inputs']['x']
rule_type_to_inputs[rule_type].add(rule_inputs)
answer_group_dict['rule_specs'] = [{
'rule_type': rule_type,
'inputs': {'x': list(rule_type_to_inputs[rule_type])}
} for rule_type in rule_type_to_inputs]
return question_state_dict
@classmethod
def _convert_state_v40_dict_to_v41_dict(cls, question_state_dict):
"""Converts from version 40 to 41. Version 41 adds
TranslatableSetOfUnicodeString and TranslatableSetOfNormalizedString
objects to RuleSpec domain objects to allow for translations.
Args:
question_state_dict: dict. A dict where each key-value pair
represents respectively, a state name and a dict used to
initialize a State domain object.
Returns:
dict. The converted question_state_dict.
"""
class ContentIdCounter:
"""This helper class is used to keep track of
next_content_id_index and new_content_ids, and provides a
function to generate new content_ids.
"""
def __init__(self, next_content_id_index):
"""Initializes a ContentIdCounter object.
Args:
next_content_id_index: int. The next content id index.
"""
self.new_content_ids = []
self.next_content_id_index = next_content_id_index
def generate_content_id(self, content_id_prefix):
"""Generate a new content_id from the prefix provided and
the next content id index.
Args:
content_id_prefix: str. The prefix of the content_id.
Returns:
str. The generated content_id.
"""
content_id = '%s%i' % (
content_id_prefix,
self.next_content_id_index)
self.next_content_id_index += 1
self.new_content_ids.append(content_id)
return content_id
# As of Jan 2021, which is when this migration is to be run, only
| |
<gh_stars>1-10
import pyaudio
import wave
import sys
import os
import subprocess
from keeb_async import KeyboardThread
from sig_proc import SigProc
import numpy as np
import matplotlib.pyplot as plt
import time
import asyncio
class AudioStim:
def __init__(self, send_stim_data, set_stim_mode):
#audio
self.CHUNK = 1024
self.wf = None #wave file in memory
self.p = None # pyaudio instance
self.stream = None #pyaudio output audio stream
self.audio_data = None #raw song data
self.audio_sf = None #Hz, audio signal sampling frequency
#stim
self.send_stim_data = send_stim_data #where to send our stim chunks to
self.set_stim_mode = set_stim_mode #when to start / stop playing stim, configuration mode
self.stim_chunk_time = 1000 # milliseconds, period of chunk time
self.stim_sf = 20 #Hz, brain stim signal sampling frequency
self.latency_adjust = 0 #signed int to adjust the latency by self.latency_step_size
self.latency_step_size = 5 #milliseconds, quantized to the nearest sample
self.stim_track = None #the values to send to our stimulator, at stim_sf
self.stim_offset = 0 #our current offset in our stim array
self.num_samples_per_stim_packet = int((self.stim_chunk_time / 1000) * self.stim_sf) #how many samples to send per chunk for stim
#delay config
self.space_receive_times = list()
self.audio_bt_delay = 0.115
self.stim_bt_delay = 0.105
#signal processing
self.sig_proc = SigProc()
self.stft_window = 100 #short time fourier transform window size in milliseconds
self.kick_drum_band = (40, 800) #frequency range in Hz
#setup keyboard thread
self.keeb_thread = KeyboardThread(self.audio_config_handle_keyboard_input) #a thread which listen for keyboard commands and throws an event on this thread when one is seen
def kill(self):
self.keeb_thread.kill()
def download_youtube_video(self, url):
print("Downloading youtube video...")
command = "youtube-dlc -f 251 {}".format(url)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
out, err = process.communicate()
process.wait()
if process.returncode != 0: #0 is success
print("youtube-dlc failed to get this video. Is youtube-vlc installed (see README)")
sys.exit()
out = out.decode("utf-8").split("\n")
video_name = None
for line in out:
dl_str = "Destination: "
already_have_str = " has already been downloaded"
ah_start_string = "[download] "
file_end = ".webm"
dl_loc = line.find(dl_str)
ah_loc = line.find(already_have_str)
if dl_loc != -1:
video_name = line[dl_loc+len(dl_str):]
video_name = video_name[:video_name.find(file_end)+len(file_end)]
fixed_video_name = video_name.replace("(", "_").replace(")", "_").replace(" ", "_").replace("-", "_")
mv_cmd = "mv \"{}\" {}".format(video_name, fixed_video_name)
print(mv_cmd)
video_name = fixed_video_name
process = subprocess.Popen(mv_cmd, shell=True, stdout=subprocess.PIPE)
process.wait()
print("process return code:")
print(process.returncode)
if process.returncode != 0: #0 is success
print("mv command failed. Exiting.")
sys.exit()
elif ah_loc != -1:
video_name = line[len(ah_start_string):]
video_name = video_name[:video_name.find(file_end)+len(file_end)]
return video_name
def open_audio_file(self, filename):
if "http" in filename: #if http, it's a link, and we parse it
if "youtube" in filename:
filename = self.download_youtube_video(filename)
if filename is None:
print("Failed to get Youtube video. Exiting.")
sys.exit()
filename_no_ext, file_extension = os.path.splitext(filename)
#if the passed in file isn't a wav, convert it to one
if file_extension != "wav":
if not os.path.exists(filename_no_ext + ".wav"):
command = "ffmpeg -i '{}' {}.wav".format(filename, filename_no_ext.strip(" ").strip("-"))
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
if process.returncode != 0: #0 is success
print("Audio file format not supported, use WAV, MP3, WEBM, etc. Exiting.")
sys.exit()
#use wav file instead
filename = filename_no_ext + ".wav"
#read in the audio data
self.wf = wave.open(filename, 'rb')
self.audio_data = self.wf.readframes(self.CHUNK)
self.audio_sf = self.wf.getframerate()
#open pyaudio and open an output stream
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=self.p.get_format_from_width(self.wf.getsampwidth()),
channels=self.wf.getnchannels(),
rate=self.wf.getframerate(),
output=True)
def generate_stim_track(self):
#get audio data in numpy format
audio_left, audio_right = self.get_numpy_audio()
#do sliding window Short Time Fourier Transform
window_size = int((self.stft_window / 1000) * self.audio_sf) #divide by 1000, because we are using milliseconds
step_size = window_size
audio = audio_left + audio_right
self.sig_proc.sf = self.wf.getframerate()
stft_freqs, stft_ps = self.sig_proc.sliding_window_psd(audio, window_size, step_size=step_size)
#plot one of the fourier transforms
#plt.plot(stft_freqs[30], stft_ps[30])
#plt.show()
#get a time series signal representing the power of the input frequency band
kick_drum_power = self.sig_proc.get_band_power_series(stft_freqs, stft_ps, *self.kick_drum_band)
#problem : the period (1 / samplingfrequency) of the power series is equal to the window size, so we resample to fit whatever sampling frequency our stim system is using
current_sf = 1 / (self.stft_window / 1000)
kick_drum_power = self.sig_proc.resample_signal(kick_drum_power, current_sf, self.stim_sf)
#threshold that power
kick_drum_low_threshold = 0.24
kick_drum_high_threshold = 0.24
kick_drum_power[kick_drum_power <= kick_drum_low_threshold] = 0
kick_drum_power[kick_drum_power > kick_drum_high_threshold] = 1
#kick_drum_power[(kick_drum_power < kick_drum_high_threshold) & (kick_drum_power > kick_drum_low_threshold)] = 0.5
#flip from 1 to -1
flip = True
series = -1
series_count = 3
for i, val in enumerate(kick_drum_power):
if val == 0 and ((series == -1) or (series > series_count)):
if series > series_count: #if we were just in a series of 1's and now a zero, change flip and end the series
flip = not flip
series = -1
else: #val is 1, or we are running in a series
if flip:
kick_drum_power[i] = -1.0
series += 1
#now that we've thresholded the values and made them negative and positive, we reset by normalizing
kick_drum_power = self.sig_proc.normalize(kick_drum_power)
kick_drum_power[kick_drum_power > 0.8] = 0.8
kick_drum_power[kick_drum_power < 0.2] = 0.2
#set the stim track from the previous calculations
self.stim_track = kick_drum_power
def get_numpy_audio(self):
"""
Convert audio wav file to numpy array.
return tuple with (audio_left, audio_right), as numpy arrays
"""
samples = self.wf.getnframes()
audio = self.wf.readframes(samples)
self.wf.rewind()
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(np.float32)
# Normalise float32 array so that values are between -1.0 and +1.0
max_int16 = 2**15
audio_normalised = audio_as_np_float32 / max_int16
channels = self.wf.getnchannels()
audio_stereo = np.empty((int(len(audio_normalised)/channels), channels))
audio_stereo[:,0] = audio_normalised[range(0,len(audio_normalised),2)]
audio_stereo[:,1] = audio_normalised[range(1,len(audio_normalised),2)]
return audio_stereo[:,0], audio_stereo[:,1]
def add_stim_offset(self, phase_shift_seconds):
"""
Add a positive or negative phase shift to the stim track signal
"""
#self.stim_offset += max(0, int(self.stim_sf * phase_shift_seconds)) #does not allow for negative phase shift at start of song
self.stim_offset += int(self.stim_sf * phase_shift_seconds)
#if stim_offset is negative, then we must insert new values at the beginning of the stim track to create a phase delay
if self.stim_offset < 0:
self.stim_track = self.stim_track.insert(np.zeros(-self.stim_offset)) #insert zeros to cause a phase offset at the beginning of the song
self.stim_offset = 0
def send_next_stim_chunk(self):
#send stim packet
self.send_stim_data(self.stim_track[self.stim_offset:self.stim_offset+self.num_samples_per_stim_packet])
self.stim_offset = self.stim_offset + self.num_samples_per_stim_packet
def audio_delay_config(self):
"""
Figure out the total delay between PyAudio playing audio on laptop and user hearing that audio on their bluetooth headphones/speaker.
"""
#first, load the diract delta measure/impulse sound we are playing to the user
self.open_audio_file("./Closed-Hi-Hat-2.wav")
#take over the keyboard control to listen for space bar
self.set_keeb_callback(self.audio_config_handle_keyboard_input)
#then, play that impulse every n seconds
impulse_freq = 1.5 #Hz
impulse_period = 1 / impulse_freq
audio_play_times = list()
self.space_receive_times = list()
print("\n\nPress the space bar in time with the beat 15 times, like you're playing a drum on the space bar...")
time.sleep(3)
for i in range(15):
#play the impulse, record the time we play it
curr_time = time.time()
audio_play_times.append(curr_time)
#get the audio and send it out to be played
self.wf.rewind() #go back to beginning of sound
self.audio_data = self.wf.readframes(self.CHUNK)
self.stream.write(self.audio_data)
#delay our period before we play the next impulse
time.sleep(impulse_period)
#compute the average delay
drop_n = 3 #get rid of first n as that's when the user was adjusting to the speed of the rhythm
avg_delay = np.mean(np.abs(np.array(audio_play_times[drop_n:]) - np.array(self.space_receive_times[drop_n:])))
self.audio_bt_delay = avg_delay
print("AUDIO DELAY IS: {}".format(self.audio_bt_delay))
self.close_audio()
def stim_delay_config(self):
"""
Calculate delay between stim track on laptop and current output on stim device.
"""
self.space_receive_times = []
light_flash_times = []
self.set_stim_mode("config")
#take over the keyboard control to listen for space bar
self.set_keeb_callback(self.stim_config_handle_keyboard_input)
print("\n\n Press the space bar in time with the flashing light on your device 15 times")
# flashes at a known frequency
for _ in range(15):
light_flash_times.append(time.time())
to_send = np.zeros(20)
to_send[:2] = 1
self.send_stim_data(to_send)
time.sleep(1)
drop_n = 3 #get rid of first n as that's when the user was adjusting to the speed of the rhythm
avg_delay = np.mean(np.abs(np.array(light_flash_times[drop_n:]) - np.array(self.space_receive_times[drop_n:])))
self.stim_bt_delay = avg_delay
print("STIM DELAY CONFIG IS: {}".format(self.stim_bt_delay))
self.set_stim_mode("inactive")
def set_keeb_callback(self, callback):
self.keeb_thread.set_keeb_callback(callback)
def play_handle_keyboard_input(self, key):
if key == 200: #up arrow
self.latency_adjust += 1
elif key == 201: #down arrow
self.latency_adjust -= 1
print(self.latency_adjust)
def stim_config_handle_keyboard_input(self, key):
self.space_receive_times.append(time.time())
def audio_config_handle_keyboard_input(self, key):
self.space_receive_times.append(time.time())
def close_audio(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def play_music_plus_plus(self):
if self.audio_data == None:
print("Called `play_music_plus_plus()` without any data loaded. Please load a song first.")
return
#set the starting phase adjustment
| |
function loads splitten datasets from files (created by `create_splits´ function).
We assume to have those files in a fixed path, hence no file name as input is needed.
'''
# loading train set...
train_set = []
with open(PREFIX_COLAB + f"dataset/splits_frac_{FRAC}/train_set.txt", 'r') as f:
for line in f.readlines():
train_set.append(line.split('\t'))
# loading validation set...
val_set = []
with open(PREFIX_COLAB + f"dataset/splits_frac_{FRAC}/val_set.txt", 'r') as f:
for line in f.readlines():
val_set.append(line.split('\t'))
# loading test set...
test_set = []
with open(PREFIX_COLAB + f"dataset/splits_frac_{FRAC}/test_set.txt", 'r') as f:
for line in f.readlines():
test_set.append(line.split('\t'))
return train_set, val_set, test_set
def tokenize_sequence(sequence):
'''
The function tokenizes a given sequence using a similar approach of the `tokenize´ function.
It returns a list of tokens.
'''
# first, we split the sequence by spaces
sequence = sequence.split()
# initialize the tokens' list
tokenized_sequence = []
for token in sequence:
# we handle the case of `...´
if '...' in token:
if token[0:3] == '...':
tokenized_sequence.append('...')
tokenized_sequence.append(token.strip('...'))
elif token[-3:0] == '...':
tokenized_sequence.append(token.strip('...'))
tokenized_sequence.append('...')
else:
wl, wr = token.split('...')
tokenized_sequence.append(wl)
tokenized_sequence.append('...')
tokenized_sequence.append(wr)
continue
# we handle the other symbols
symbols = [',',';','.',':','-','_','"','?','!','<','>','=','(',')','[',']','$','/','%','*','+','@']
# we use a flag to manage tokens without any symbol
there_is_a_symbol = False
for elem in symbols:
# if the token is just one symbol...
if elem == token:
tokenized_sequence.append(elem)
there_is_a_symbol = True
break
# if the token contains a symbol...
# We assume at most one symbol for each token, eventually repeated at the beginning and at the end
# of the token
if elem in token:
# symbol at the beginning
if elem == token[0]:
tokenized_sequence.append(elem)
tokenized_sequence.append(token[1:])
# symbol at the end
elif elem == token[-1]:
tokenized_sequence.append(token[0:-1])
tokenized_sequence.append(elem)
# same symbol at both the beginning and the end
elif elem == token[0] and elem == token[-1]:
tokenized_sequence.append(elem)
tokenized_sequence.append(token.strip(elem))
tokenized_sequence.append(elem)
# symbol inside the token
else:
# it could be the case of: `It's´ or `trade-off´
tokenized_sequence.append(token)
there_is_a_symbol = True
break
# if the token does not contain any symbol...
if not there_is_a_symbol:
tokenized_sequence.append(token)
return tokenized_sequence
def ids_sequence(tokenized_sequence, vocab, max_sequence_length, with_pad=True):
'''
We used this function to convert tokens into ids.
Args:
- tokenized_sequence: an already tokenized sentence
- vocab: the referred vocabulary
- max_sequence_length: the lenght of the longest sentence in the batch
- with_pad: boolean to add or remove padding
Returns: a list containing the ids of the sentence, the <sos> and <eos> ids, and eventually the padding id.
'''
# initialize the list with the <sos> id
ids_sequence = [vocab['<sos>'][0]]
# converting tokens into ids
for token in tokenized_sequence:
if token in vocab:
ids_sequence.append(vocab[token][0])
else:
# if a token is out of vocabulary...
ids_sequence.append(vocab['<unk>'][0])
# append the final <eos> id
ids_sequence.append(vocab['<eos>'][0])
if with_pad:
# fill the gap with padding
while len(ids_sequence) < max_sequence_length+2:
ids_sequence.append(vocab['<pad>'][0])
return ids_sequence
class Encoder(torch.nn.Module):
'''
We use a LSTM to map the input sequence to a fixed dimensional vector.
Specifically, the encoder consists of one Embedding module, one Dropout module and one LSTM module.
'''
def __init__(self, input_vocab, embedding_dim, hidden_dim, n_layers, dropout_prob, bidirectional, attention, pretrained_embeddings=None):
super().__init__()
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.bidirectional = bidirectional
self.attention = attention
self.padding_idx = input_vocab['<pad>'][0]
# case of no pretrained embedding
if pretrained_embeddings is None:
self.embedding = torch.nn.Embedding(len(input_vocab), embedding_dim)
# case of pretrained embedding
else:
self.embedding = torch.nn.Embedding.from_pretrained(pretrained_embeddings, freeze=True, padding_idx=self.padding_idx)
self.dropout = torch.nn.Dropout(dropout_prob)
# case of no bidirectional lstm
if self.bidirectional is False:
self.lstm = torch.nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout_prob)
# case of bidirectional lstm
else:
self.lstm = torch.nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout_prob, bidirectional=True)
def forward(self, input_batch, input_batch_len):
'''
The Encoder takes one input batch and its sequence lenghts (created by the `create_minibatch´ function)
and returns outputs, hidden and cell tensors.
'''
# input_batch.shape = [max_seq_len,batch_size]
# input_batch_len.shape = [batch_size]
# applying embedding and dropout layers
input_batch = self.dropout(self.embedding(input_batch))
# input_batch.shape = [max_seq_len,batch_size,embedding_dim]
# gather the data avoiding to process padding
input_batch = torch.nn.utils.rnn.pack_padded_sequence(input_batch, input_batch_len.cpu(), batch_first=False)
# input_batch becomes a `PackedSequence´ object
# applying lstm layer
outputs, (hidden, cell) = self.lstm(input_batch)
# outputs is still a `PackedSequence´ object
# hidden and cell shape = [n_layers*n_directions,batch_size,hidden_dim]
# fix the dimensions in case of bidirectional lstm
if self.bidirectional:
# hidden[0,:,:] and hidden[1,:,:] are 2D tensor so dim=1 refers to the hidden_dim
hidden = torch.cat([hidden[0,:,:], hidden[1,:,:]], dim=1).unsqueeze(0)
cell = torch.cat([cell[0,:,:], cell[1,:,:]], dim=1).unsqueeze(0)
# hidden and cell shape = [1,batch_size,2*hidden_dim]
# padding `outputs´ in case of attention mechanism (it becomes a tensor)
if self.attention:
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs, padding_value=self.padding_idx)
# outputs.shape = [max_seq_len,batch_size,hidden_dim]
return outputs, hidden, cell
class Attention(torch.nn.Module):
'''
Global attention mechanism based on two linear layers.
'''
def __init__(self, encoder_hidden_dim, decoder_hidden_dim):
super().__init__()
self.attn_hidden_vector = torch.nn.Linear(encoder_hidden_dim + decoder_hidden_dim, decoder_hidden_dim)
self.attn_scoring_fn = torch.nn.Linear(decoder_hidden_dim, 1, bias=False)
def forward(self, hidden, encoder_outputs):
'''
We use the hidden representation of the predicted token and the outputs
of the encoder to compute the attention weights.
'''
# hidden.shape = [1,batch_size,decoder_hidden_dim] so we repeat it in dim 0 in order to concatenate
hidden = hidden.repeat(encoder_outputs.shape[0], 1, 1)
# hidden.shape = [max_seq_len,batch_size,decoder_hidden_dim]
# torch.cat((hidden, encoder_outputs), dim=2).shape = [max_seq_len,batch_size,encoder_hidden_dim+decoder_hidden_dim]
att_hidden = torch.tanh(self.attn_hidden_vector(torch.cat((hidden, encoder_outputs), dim=2)))
# att_hidden.shape = [max_seq_len,batch_size,decoder_hidden_dim]
att_scoring_vector = self.attn_scoring_fn(att_hidden).squeeze(2)
# att_scoring_vector.shape = [max_seq_len,batch_size]
att_scoring_vector = att_scoring_vector.permute(1, 0)
# att_scoring_vector.shape = [batch_size,max_seq_len]
return torch.nn.functional.softmax(att_scoring_vector, dim=1)
# softmax(att_scoring_vector, dim=1).shape = [batch_size,max_seq_len]
class OneStepDecoder(torch.nn.Module):
'''
OneStepDecoder takes the previous predicted word and returns the next one.
It consists of one Embedding module, one Dropout module, one LSTM module and one Linear module.
'''
def __init__(self, target_vocab, embedding_dim, decoder_hidden_dim, n_layers, dropout_prob, encoder_bidirectional, attention=None, encoder_hidden_dim=None):
super().__init__()
# we use the one-hot representation at the end of each OneStepDecoder, hence we need the target vocabulary size
self.target_vocab_size = len(target_vocab)
self.encoder_bidirectional = encoder_bidirectional
self.attention = attention
self.embedding = torch.nn.Embedding(self.target_vocab_size, embedding_dim)
self.dropout = torch.nn.Dropout(dropout_prob)
# case of no bidirectional encoder...
if self.encoder_bidirectional is False:
# case of attention...
if self.attention is not None:
self.lstm = torch.nn.LSTM(encoder_hidden_dim + embedding_dim, decoder_hidden_dim, n_layers, dropout=dropout_prob)
self.linear = torch.nn.Linear(encoder_hidden_dim + decoder_hidden_dim + embedding_dim, self.target_vocab_size)
# basic case...
else:
self.lstm = torch.nn.LSTM(embedding_dim, decoder_hidden_dim, n_layers, dropout=dropout_prob)
self.linear = torch.nn.Linear(decoder_hidden_dim, self.target_vocab_size)
# case of bidirectional encoder...
else:
self.lstm = torch.nn.LSTM(embedding_dim, 2 * decoder_hidden_dim, n_layers, dropout=dropout_prob)
self.linear = torch.nn.Linear(2 * decoder_hidden_dim, self.target_vocab_size)
def forward(self, target_tokens, hidden, cell, encoder_outputs):
'''
OneStepDecoder takes one target token for each sequence of the batch, plus
the outcomes of the encoder, and returns the one-hot representation
of the next predicted word.
'''
# target_tokens.shape = [batch_size]
# hidden and cell shape = [n_layers,batch_size,n_directions*hidden_dim]
# encoder_outputs is a `PackedSequence´ object
# Since the OneStepDecoder refers to a single word and the embedding
# layer accepts 2D tensors as input, we add a dummy dimension of size one
target_tokens = target_tokens.unsqueeze(0)
# target_tokens.shape = [1,batch_size]
embedding_layer = self.dropout(self.embedding(target_tokens))
# embedding_layer.shape = [1,batch_size,embedding_dim]
# case of attention...
if self.attention is not None:
# Calculate the attention weights of shape [max_seq_len,1,batch_size]
attention_weights = self.attention(hidden, encoder_outputs).unsqueeze(1)
# attention_weights.shape = [batch_size,1,max_seq_len]
encoder_outputs = encoder_outputs.permute(1, 0, 2)
# encoder_outputs.shape = [batch_size,max_seq_len,hidden_dim]
context_vector = torch.bmm(attention_weights, encoder_outputs)
# context_vector.shape = [batch_size,1,hidden_dim]
context_vector = context_vector.permute(1, 0, 2)
# context_vector.shape = [1,batch_size,hidden_dim]
# embedding_layer.shape = [1,batch_size,embedding_dim]
attention_vector = torch.cat((embedding_layer, context_vector), dim=2)
# attention_vector.shape = [1,batch_size,embedding_dim+hidden_dim]
output, (hidden, cell) = self.lstm(attention_vector, (hidden, cell))
# output.shape = [1,batch_size,hidden_dim]
# hidden and cell shape = [1,batch_size,hidden_dim]
# we remove the first dimension to apply the Linear module
# torch.cat((output.squeeze(0), context_vector.squeeze(0), embedding_layer.squeeze(0)), dim=1).shape = [batch_size,hidden_dim+hidden_dim+embedding_dim]
linear = self.linear(torch.cat((output.squeeze(0), context_vector.squeeze(0), embedding_layer.squeeze(0)), dim=1))
# linear.shape = [batch_size,target_vocab_size]
# we remove the second dimension to plot the weights later
attention_weights = attention_weights.squeeze(1)
# attention_weights.shape = [batch_size,max_seq_len]
# case of no attention...
else:
output, (hidden, cell) = self.lstm(embedding_layer, (hidden, cell))
# output.shape : [1,batch_size,n_directions*hidden_dim]
# hidden and cell shape = [n_layers,batch_size,n_directions*hidden_dim]
# we remove the first dimension to apply the Linear module
linear = self.linear(output.squeeze(0))
# linear.shape = [batch_size,target_vocab_size]
attention_weights = None # dummy return
return linear, hidden, cell, attention_weights
class Decoder(torch.nn.Module):
'''
We use another LSTM to decode the target sequence from a vector.
We recursively call the OneStepDecoder.
'''
def __init__(self, one_step_decoder):
super().__init__()
self.one_step_decoder | |
<filename>cwmud/core/shells.py
# -*- coding: utf-8 -*-
"""Shell management and client input processing."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 <NAME>
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from weakref import WeakValueDictionary
from . import const
from .commands import Command
from .logs import get_logger
from .utils import joins
from .utils.exceptions import AlreadyExists
from .utils.mixins import (HasFlags, HasFlagsMeta, HasParent, HasParentMeta,
HasWeaks, HasWeaksMeta)
log = get_logger("shells")
class ShellManager:
"""A manager for shell registration.
This is a convenience manager and is not required for the server to
function. All of its functionality can be achieved by subclassing,
instantiating, and referencing shells directly.
"""
def __init__(self):
"""Create a new shell manager."""
self._shells = {}
def __contains__(self, shell):
return shell in self._shells
def __getitem__(self, shell):
return self._shells[shell]
def register(self, shell):
"""Register a shell.
This method can be used to decorate a Shell class.
:param Shell shell: The shell to be registered
:returns Shell: The registered shell
:raises AlreadyExists: If a shell with that class name already exists
:raises TypeError: If the supplied or decorated class is not a
subclass of Shell
"""
if not isinstance(shell, type) or not issubclass(shell, Shell):
raise TypeError("must be subclass of Shell to register")
name = shell.__name__
if name in self._shells:
raise AlreadyExists(name, self._shells[name], shell)
self._shells[name] = shell
return shell
class _ShellMeta(HasFlagsMeta, HasWeaksMeta, HasParentMeta):
def __init__(cls, name, bases, namespace):
super().__init__(name, bases, namespace)
cls._verbs = WeakValueDictionary()
cls._truncated_verbs = WeakValueDictionary()
class Shell(HasFlags, HasWeaks, HasParent, metaclass=_ShellMeta):
"""A shell for processing client input."""
# These are overridden in the metaclass, I just put them here
# to avoid a lot of unresolved reference errors in IDE introspection.
_verbs = None
_truncated_verbs = None
state = const.STATE_CONNECTED
# Delimiters should be a pair of equal-length strings that contain
# opening and closing delimiter characters. A delimiter at any given index
# in the first string will be the opening delimiter that will pair with a
# closing delimiter at the same index in the second string. This allows
# shells to delimit arguments using non-equal pairs such as braces,
# brackets, and parentheses.
delimiters = ("\"'`", "\"'`")
def __init__(self):
"""Create a new shell."""
super().__init__()
@property
def session(self):
"""Return the current session for this shell."""
return self._get_weak("session")
@session.setter
def session(self, new_session):
"""Set the current session for this shell.
If `new_session` is not None, this shell's init method
will be called.
:param Session new_session: The session tied to this shell
:returns None:
"""
self._set_weak("session", new_session)
if new_session is not None:
self.init()
def init(self):
"""Initialize this shell for the session.
This method is called when the shell is assigned; override it to do
anything prior to the initial prompt.
"""
return
def get_prompt(self):
"""Generate the current prompt for this shell."""
return "^y>^~ "
@staticmethod
def _validate_verb(verb):
if not verb or not isinstance(verb, str):
raise ValueError(joins("invalid verb:", repr(verb)))
if len(verb) > 1 and not verb.isalpha():
raise ValueError("non-shortcut verbs can only contain letters")
@classmethod
def add_verbs(cls, command, *verbs, truncate=True):
"""Add verbs to this shell that trigger a given command.
:param Command command: The command that will be executed
:param str verbs: A sequence of verbs that trigger the command
:param bool truncate: Whether to add truncated verbs to the shell
:returns None:
:raises TypeError: If the given command is not a Command subclass
:raises ValueError: If any of the verbs are not valid verbs
"""
if not isinstance(command, type) or not issubclass(command, Command):
raise TypeError("cannot add verbs for non-Command class")
# This first pass is validation only, as we don't want to add one verb
# only then to find out that another is bad and try and clean up.
for verb in verbs:
cls._validate_verb(verb)
if verb in cls._verbs and verb not in cls._truncated_verbs:
raise AlreadyExists(verb, cls._verbs[verb], command)
for verb in verbs:
verb = verb.lower()
cls._verbs[verb] = command
if verb in cls._truncated_verbs:
del cls._truncated_verbs[verb]
if truncate:
# Check all the shorter forms and add them if they're valid.
# We're trading off the memory of a bigger dict to store the
# truncated entries versus the CPU time it would take to search
# a store of full commands for a partial match.
verb = verb[:-1]
while verb:
if verb not in cls._verbs:
# First come, first served. If you want a command to
# have truncated priority over another, register it
# first.
cls._verbs[verb] = command
cls._truncated_verbs[verb] = command
verb = verb[:-1]
@classmethod
def remove_verbs(cls, *verbs):
"""Remove verbs from this shell.
:param str verbs: A sequence of verbs to remove
:returns None:
"""
for verb in verbs:
command = cls._verbs.get(verb)
if command:
del cls._verbs[verb]
verb = verb[:-1]
while verb:
if cls._verbs.get(verb) is command:
del cls._verbs[verb]
verb = verb[:-1]
@classmethod
def get_command(cls, verb):
"""Get a command in this shell by its verb.
:param str verb: The verb of the command to get
:returns Command|None: The command with that verb or None
"""
return cls._verbs.get(verb)
@classmethod
def find_command(cls, verb):
"""Find a command in this shell's lineage by its verb.
Will return the first command found, as multiple stores may have
different commands using the same verb.
:param str verb: The verb of the command to search for
:returns Command|None: The command with that verb or None
"""
verb = verb.lower()
for shell in cls.get_lineage():
command = shell.get_command(verb)
if command:
return command
@classmethod
def _one_argument(cls, data):
"""Parse a single argument from data.
This always returns exactly two values; if there is no remaining data
after parsing one argument, the second value will be an empty string.
If there was no data worth parsing, both values will be empty strings.
:param str data: The data to get an argument from
:returns tuple<str,str>: The parsed argument and any remaining data
"""
# Dump leading whitespace.
data = data.lstrip()
# Is there anything left to parse?
if not data:
return "", ""
if data[0] in cls.delimiters[0]:
# This is a delimited string, so read until it ends or data does.
delimiter = cls.delimiters[0].index(data[0])
delimiter_end = cls.delimiters[1][delimiter]
closed = False
try:
# Does this delimited string have a closing delimiter?
end = data.index(delimiter_end, 1)
closed = True
except ValueError:
# No it doesn't, so read everything.
end = len(data)
arg = data[1:end]
if closed:
end += 1
if not arg:
# It was an empty delimited string, start over and
# look for a new argument.
return cls._one_argument(data[end:])
else:
# Not a delimited string, so read until whitespace or a delimiter.
end = 1
data_end = len(data)
stop_on = " \n\r\t" + cls.delimiters[0]
while end < data_end:
if data[end] in stop_on:
break
end += 1
arg = data[:end]
# One way or another, we found an argument.
return arg, data[end:]
@classmethod
def _iter_arguments(cls, data):
while data:
arg, data = cls._one_argument(data)
if arg:
yield arg
@classmethod
def _get_arguments(cls, data, max_args=-1):
"""Parse data into a list of arguments.
Any un-parsed arguments (either because max was reached or because a
delimiter was opened and not closed) will be returned as the last
element of the resulting list.
:param str data: The data to be broken down into arguments
:param int max_args: The maximum number of arguments to parse,
if less than zero, all arguments are parsed
:returns list: The parsed arguments
"""
args = []
while data and max_args != 0:
arg, data = cls._one_argument(data)
if arg:
args.append(arg)
if max_args > 0:
max_args -= 1
if data:
args.append(data)
return args
def parse(self, data):
"""Parse input from the client session.
:param str data: The data to be parsed
:returns None:
"""
if not data:
return
command = None
if not data[0].isalpha():
# Check for verb shortcuts.
command = self.find_command(data[0])
if command:
# We found a shortcut, everything else is part of an argument.
data = data[1:]
else:
# No shortcut, so find a verb.
arg, data = self._one_argument(data)
command = self.find_command(arg)
if command:
if command.no_parse:
# Let this command do its own argument parsing.
args = [data]
else:
args = self._get_arguments(data)
try:
instance = command(self.session, | |
r += math.sqrt(imps_and_chrates[w, 0] * imps_and_chrates[w, 1])
s = sum(imps_and_chrates[:, 1])
idxs_and_value_ratios.sort(key=itemgetter(0))
rem_bandwidth = bandwidth
for w in range(len(idxs_and_value_ratios)):
if (imps_and_chrates[idxs_and_value_ratios[w][1], 0] * imps_and_chrates[idxs_and_value_ratios[w][1], 1] \
/ (imps_and_chrates[idxs_and_value_ratios[w][1], 1] + min_crawl_rate)**2 <= (r / (rem_bandwidth + s ))**2):
r -= math.sqrt(imps_and_chrates[idxs_and_value_ratios[w][1], 0] \
* imps_and_chrates[idxs_and_value_ratios[w][1], 1])
s -= imps_and_chrates[idxs_and_value_ratios[w][1], 1]
crawl_rates[idxs_and_value_ratios[w][1]] = min_crawl_rate
rem_bandwidth -= crawl_rates[idxs_and_value_ratios[w][1]]
else:
# NOTE: this clause kicks in at every iteration after some iteration M. It doesn't alternate with the clause above.
crawl_rates[idxs_and_value_ratios[w][1]] = \
math.sqrt(imps_and_chrates[idxs_and_value_ratios[w][1], 0] \
* imps_and_chrates[idxs_and_value_ratios[w][1], 1]) \
* (rem_bandwidth + s) / r - imps_and_chrates[idxs_and_value_ratios[w][1], 1]
# Implements the RL version of the LambdaCrawl family of algorithms.
#
# See Algorithm 4 in the NeurIPS-2019 paper. The implementation can use either LambdaCrawl itself, or LambdaCrawlApprox, its approximate version,
# or LambdaCrawlBinary that minimizes binary policy cost as solver_x, the algorithm for handling incomplete-change-observation sources.
def LambdaLearnAndCrawl_X(solver_x, importances_incompl_obs, ch_rates_incompl_obs_actual, epsilon_incompl_obs, \
importances_compl_obs, ch_rates_compl_obs_actual, bandwidth, epsilon_learn, epoch_length, suffix_len, horizon):
start_total = time.time()
curr_time = 0
changes_incompl_obs = np.empty(len(importances_incompl_obs), dtype=np.object)
for i in range(len(changes_incompl_obs)): changes_incompl_obs[i] = []
crawls_incompl_obs = np.empty(len(importances_incompl_obs), dtype=np.object)
for i in range(len(crawls_incompl_obs)): crawls_incompl_obs[i] = []
observations_incompl_obs = np.empty(len(importances_incompl_obs), dtype=np.object)
for i in range(len(observations_incompl_obs)): observations_incompl_obs[i] = []
changes_compl_obs = np.empty(len(importances_compl_obs), dtype=np.object)
for i in range(len(changes_compl_obs)): changes_compl_obs[i] = []
crawls_compl_obs = np.empty(len(importances_compl_obs), dtype=np.object)
for i in range(len(crawls_compl_obs)): crawls_compl_obs[i] = []
observations_compl_obs = changes_compl_obs # since the observation sequence is complete, we observe every change
ch_rates_incompl_obs_est = [1.0] * len(ch_rates_incompl_obs_actual)
ch_rates_compl_obs_est = [1.0] * len(ch_rates_compl_obs_actual)
idealized_policy_costs_harmonic = []
idealized_policy_costs_binary = []
while curr_time < horizon:
start_sch = time.time()
# Compute the optimal policy parameters given the current change rate estimates.
crawl_rates_incompl_obs, crawl_probs_compl_obs = solver_x(importances_incompl_obs, ch_rates_incompl_obs_est, \
epsilon_incompl_obs, importances_compl_obs, ch_rates_compl_obs_est, bandwidth)
# Apply Propositions 1 & 4 from the NeurIPS-2019 paper to find the (asymptotic) harmonic policy cost.
idealized_policy_cost_harmonic = HarmonicPolicyCost(crawl_rates_incompl_obs, crawl_probs_compl_obs, importances_incompl_obs,\
ch_rates_incompl_obs_actual, importances_compl_obs, ch_rates_compl_obs_actual)
# Apply Equations 12 & 13 from the NeurIPS-2019 paper's supplement to find the (asymptotic) binary policy cost.
idealized_policy_cost_binary = BinaryPolicyCost(crawl_rates_incompl_obs, crawl_probs_compl_obs, importances_incompl_obs,\
ch_rates_incompl_obs_actual, importances_compl_obs, ch_rates_compl_obs_actual)
print("Idealized harmonic policy cost: ", idealized_policy_cost_harmonic)
idealized_policy_costs_harmonic.append(idealized_policy_cost_harmonic)
idealized_policy_costs_binary.append(idealized_policy_cost_binary)
end_sch = time.time()
start_ext = time.time()
# Advance the simulation time by until the horizon: simulate source changes for sources of both types until the horizon, then, given these
# changes, simulate crawls from the current policy using policy parameters computed above, and finally generate observations until the
# horizon, given the generated changes and crawls. Note that for extending the observation history of complete-observation
# sources, the scheduling policy doesn't matter.
ExtendChangeHistory(changes_incompl_obs, ch_rates_incompl_obs_actual, curr_time, epoch_length)
ExtendChangeHistory(changes_compl_obs, ch_rates_compl_obs_actual, curr_time, epoch_length)
ExtendCrawlHistory_IncomplObs(crawls_incompl_obs, crawl_rates_incompl_obs, curr_time, epoch_length)
ExtendCrawlHistory_ComplObs(changes_compl_obs, crawls_compl_obs, crawl_probs_compl_obs, curr_time, epoch_length)
ExtendObsHistory_IncomplObs(changes_incompl_obs, crawls_incompl_obs, observations_incompl_obs, curr_time, epoch_length)
end_ext = time.time()
curr_time = min(curr_time + epoch_length, horizon)
start_learn = time.time()
# Re-learn the change rates from the extended observation data. There is actually no need to learn for the incomplete-observation sources
# in case we use LambdaLearnAndCrawlApprox, but we learn them anyway.
LearnChRates(observations_incompl_obs, ch_rates_incompl_obs_est, observations_compl_obs, ch_rates_compl_obs_est, \
epsilon_learn, math.inf, curr_time)
end_learn = time.time()
#print('\tLearning took {} seconds'.format(end_learn - start_learn))
end_total = time.time()
total_time = end_total - start_total
print('RL took {} seconds in total'.format(total_time))
policy_cost = EvalMixedTrace(importances_incompl_obs, changes_incompl_obs, crawls_incompl_obs, importances_compl_obs, changes_compl_obs, \
crawls_compl_obs, horizon)
return policy_cost, total_time, idealized_policy_costs_harmonic, idealized_policy_costs_binary
# Implements LambdaLearnAndCrawl. See LambdaLearnAndCrawl_X for details.
def LambdaLearnAndCrawl(importances_incompl_obs, ch_rates_incompl_obs_actual , epsilon_incompl_obs, \
importances_compl_obs, ch_rates_compl_obs_actual, bandwidth, epsilon_learn, epoch_length, suffix_len, horizon):
print("Running LambdaLearnAndCrawl...")
return LambdaLearnAndCrawl_X(LambdaCrawl, importances_incompl_obs, ch_rates_incompl_obs_actual , epsilon_incompl_obs, \
importances_compl_obs, ch_rates_compl_obs_actual, bandwidth, epsilon_learn, epoch_length, suffix_len, horizon)
# Implements LambdaLearnAndCrawlApprox. See LambdaLearnAndCrawl_X for details.
def LambdaLearnAndCrawlApprox(importances_incompl_obs, ch_rates_incompl_obs_actual , epsilon_incompl_obs, \
importances_compl_obs, ch_rates_compl_obs_actual, bandwidth, epsilon_learn, epoch_length, suffix_len, horizon):
print("Running LambdaLearnAndCrawlApprox...")
return LambdaLearnAndCrawl_X(LambdaCrawlApprox, importances_incompl_obs, ch_rates_incompl_obs_actual , epsilon_incompl_obs, \
importances_compl_obs, ch_rates_compl_obs_actual, bandwidth, epsilon_learn, epoch_length, suffix_len, horizon)
# Implements LambdaLearnAndCrawlBinary. See LambdaLearnAndCrawl_X for details.
def LambdaLearnAndCrawlBinary(importances_incompl_obs, ch_rates_incompl_obs_actual , epsilon_incompl_obs, \
importances_compl_obs, ch_rates_compl_obs_actual, bandwidth, epsilon_learn, epoch_length, suffix_len, horizon):
print("Running LambdaLearnAndCrawlBinary...")
# LambdaCrawlBinary doesn't know how to deal with pages with complete change observations, so put all pages into the "incomplete change
# observations bucket.
importances_incompl_obs_all = np.concatenate((importances_incompl_obs, importances_compl_obs))
ch_rates_incompl_obs_actual_all = np.concatenate((ch_rates_incompl_obs_actual, ch_rates_compl_obs_actual))
return LambdaLearnAndCrawl_X(LambdaCrawlBinary_Epsilon, importances_incompl_obs_all, ch_rates_incompl_obs_actual_all, epsilon_incompl_obs, \
[], [], bandwidth, epsilon_learn, epoch_length, suffix_len, horizon)
# Computes the n-th Harmonic number.
def Harmonic(n):
# Not very efficient for large n, but we don't expect n to be large under LambdaCrawl
return sum(1/k for k in range(1, n + 1))
# Computes the time-averaged harmonic penalty for a LambdaCrawl policy given a history of changes for a mixed set of sources with complete and
# incomplete change observations.
#
# Note that we need the history of changes themselves, not of observations, to do this evaluation. See EvalTrace for more details.
def EvalMixedTrace(importances_incompl_obs, changes_incompl_obs, crawls_incompl_obs, importances_compl_obs, changes_compl_obs, \
crawls_compl_obs, horizon):
return (len(importances_incompl_obs) * EvalTrace(importances_incompl_obs, changes_incompl_obs, crawls_incompl_obs, horizon) + \
len(importances_compl_obs) * EvalTrace(importances_compl_obs, changes_compl_obs, crawls_compl_obs, horizon)) / (len(importances_incompl_obs) + len(importances_compl_obs))
# Computes the time-averaged harmonic penalty for a LambdaCrawl policy.
def EvalTrace(importances, ch_hists, crawl_hists, horizon):
if (len(importances) == 0):
return 0
# This function returns the harmonic *cost* of a trace. I.e., the lower value it returns, the better
J_pi = 0
for w in range(len(importances)):
# If the source never changed during the observation period, its contribution to the penalty is 0
if (not ch_hists[w]):
continue
# Otherwise, if there were changes but no crawls, count the number of changes before the horizon
if (not crawl_hists[w]):
num_changes = 0
for t in range(len(ch_hists[w])):
if (ch_hists[w][t] <= horizon):
num_changes += 1
else:
break
J_pi += (importances[w] * Harmonic(num_changes) / horizon)
continue
curr_num_changes = 0
running_penalty = 0
curr_change_time_idx = 0
curr_crawl_time_idx = 0
while (curr_change_time_idx < len(ch_hists[w]) and ch_hists[w][curr_change_time_idx] <= horizon):
# To continue the loop below we need to have either (a) unprocessed crawls s.t. their crawl times are after the current
# change time (but before the horizon), or (b) no such crawls, but the time horizon hasn't been reached yet.
while (((curr_crawl_time_idx >= len(crawl_hists[w]) or crawl_hists[w][curr_crawl_time_idx] > horizon) and ch_hists[w][curr_change_time_idx] <= horizon) or \
(curr_crawl_time_idx < len(crawl_hists[w]) and crawl_hists[w][curr_crawl_time_idx] <= horizon and ch_hists[w][curr_change_time_idx] <= crawl_hists[w][curr_crawl_time_idx])):
# If the time of a crawl coincides with the time of a change exactly (this can happen in the case of sources with complete
# change observations), don't count this change -- we assume it is picked up immediately. Just advance to the next change
# time.
if (curr_crawl_time_idx >= len(crawl_hists[w]) or \
not(ch_hists[w][curr_change_time_idx] == crawl_hists[w][curr_crawl_time_idx])):
curr_num_changes += 1
if (curr_crawl_time_idx < len(crawl_hists[w]) and \
ch_hists[w][curr_change_time_idx] == crawl_hists[w][curr_crawl_time_idx]):
curr_change_time_idx += 1
break
curr_change_time_idx += 1
if (curr_change_time_idx >= len(ch_hists[w])):
break
# tally the changes we missed
running_penalty += (0 if curr_num_changes == 0 else Harmonic(curr_num_changes))
curr_num_changes = 0
curr_crawl_time_idx += 1
J_pi += (importances[w] * running_penalty / horizon)
return J_pi / len(importances)
# Extends the change history for a set of sources from curr_time up to horizon.
#
# To extend the history for a given source, samples change times from the source's Poisson change process with a given rate parameter.
def ExtendChangeHistory(ch_hists, ch_rates, curr_time, horizon):
for w in range(len(ch_hists)):
if (len(ch_hists[w]) == 0):
ch_hists[w].append(np.random.exponential(1.0 / ch_rates[w]))
while (ch_hists[w][-1] < curr_time + horizon):
ch_hists[w].append(ch_hists[w][-1] + np.random.exponential(1.0 / ch_rates[w]))
# Extends the observation history for sources with incomplete change observations.
#
# This method assumes that both the change and crawl history for sources with incomplete observations has *already* been extended
# until (or just past) curr_time + horizon. This means that for each such source both its change history and its crawl history is
# assumed to contain at least one element.
#
# WARNING: DO NOT CALL THIS METHOD TWICE WITH THE SAME (curr_time, horizon) PAIR. calling this method twice with the same (curr_time, horizon) may
# reappend existing observations and thereby invalidate the observation history.
def ExtendObsHistory_IncomplObs(ch_hists, crawl_hists, observations_incompl_obs, curr_time, horizon):
for w in range(len(observations_incompl_obs)):
# First, find the last crawl in the time interval of interest, [curr_time, curr_time + horizon]. It's possible that the latest
# scheduled crawl overall is after curr_time+horizon and therefore hasn't happened yet -- ignore |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.