ngram
listlengths
0
67.8k
[ "urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids", "self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date':", "test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested',", "_name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient',", "action.name, 'help': action.help, 'type': action.type, 'views': [ [list_view_id,'tree' ]], 'target': action.target, 'context': action.context,", "api, fields, models, _ from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request'", "= self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date =", "LICENSE file for full copyright and licensing details. from odoo import api, fields,", "'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test')", "for full copyright and licensing details. from odoo import api, fields, models, _", "= fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids =", "lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id", "new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result", "in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj =", "if new_created_id_list: imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result =", "import api, fields, models, _ from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name =", "} if len(new_created_id_list) : result['domain'] = \"[('id','in',%s)]\" % new_created_id_list return result # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:", "wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id =", "from odoo import api, fields, models, _ from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel):", "required = True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id", "= self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request'", "wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date = wizard_obj.request_date for test_id in", "= wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date = wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj", "action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name, 'help': action.help,", "= True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi", "'Tests') @api.multi def create_lab_test(self): wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id", "imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name, 'help': action.help, 'type': action.type, 'views': [ [list_view_id,'tree'", "result = { 'name': action.name, 'help': action.help, 'type': action.type, 'views': [ [list_view_id,'tree' ]],", "'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd", "}) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view')", "[list_view_id,'tree' ]], 'target': action.target, 'context': action.context, 'res_model': action.res_model, } if len(new_created_id_list) : result['domain']", "fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj =", "fields.Many2one('medical.patient','Patient', required = True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True)", "True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner')", "of BrowseInfo. See LICENSE file for full copyright and licensing details. from odoo", "fields, models, _ from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date", "= fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj = self patient_id", "datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required", "wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self):", "fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id',", "'target': action.target, 'context': action.context, 'res_model': action.res_model, } if len(new_created_id_list) : result['domain'] = \"[('id','in',%s)]\"", "fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True) urgent =", "= [] date = wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record", "[] date = wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record =", "self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date = wizard_obj.request_date", "action.res_model, } if len(new_created_id_list) : result['domain'] = \"[('id','in',%s)]\" % new_created_id_list return result #", "= wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name", "= True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id =", "]], 'target': action.target, 'context': action.context, 'res_model': action.res_model, } if len(new_created_id_list) : result['domain'] =", "date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required = True)", "'views': [ [list_view_id,'tree' ]], 'target': action.target, 'context': action.context, 'res_model': action.res_model, } if len(new_created_id_list)", "new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') })", "details. from odoo import api, fields, models, _ from datetime import date,datetime class", "_ from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request", "= fields.Many2one('medical.patient','Patient', required = True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required =", "'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id", "medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id,", "create_lab_test(self): wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = []", "models, _ from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date =", "phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd =", "= self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id =", "utf-8 -*- # Part of BrowseInfo. See LICENSE file for full copyright and", "class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id", "medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if", "list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name, 'help': action.help, 'type': action.type, 'views':", "= fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True) urgent", "date = wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id)", "{ 'name': action.name, 'help': action.help, 'type': action.type, 'views': [ [list_view_id,'tree' ]], 'target': action.target,", "'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id", "file for full copyright and licensing details. from odoo import api, fields, models,", "'name': action.name, 'help': action.help, 'type': action.type, 'views': [ [list_view_id,'tree' ]], 'target': action.target, 'context':", "'res_model': action.res_model, } if len(new_created_id_list) : result['domain'] = \"[('id','in',%s)]\" % new_created_id_list return result", "patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date = wizard_obj.request_date for", "test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj", "licensing details. from odoo import api, fields, models, _ from datetime import date,datetime", "wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required", "= self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name,", "coding: utf-8 -*- # Part of BrowseInfo. See LICENSE file for full copyright", "self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq')", "test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date,", "'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data'] action =", "odoo import api, fields, models, _ from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name", "= test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state':", "'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id", "'help': action.help, 'type': action.type, 'views': [ [list_view_id,'tree' ]], 'target': action.target, 'context': action.context, 'res_model':", "= 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required", "action.help, 'type': action.type, 'views': [ [list_view_id,'tree' ]], 'target': action.target, 'context': action.context, 'res_model': action.res_model,", "full copyright and licensing details. from odoo import api, fields, models, _ from", "= medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id)", "fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type',", "request_date = fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True)", "lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id,", "'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data']", "@api.multi def create_lab_test(self): wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list", "and licensing details. from odoo import api, fields, models, _ from datetime import", "wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name =", "date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list:", "new_created_id_list = [] date = wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type']", "'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id =", "= imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name, 'help': action.help, 'type': action.type, 'views': [", "# Part of BrowseInfo. See LICENSE file for full copyright and licensing details.", "def create_lab_test(self): wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list =", "imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name':", "import date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required =", "= True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id =", "test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id,", "copyright and licensing details. from odoo import api, fields, models, _ from datetime", "required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests')", "= wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date = wizard_obj.request_date for test_id", "See LICENSE file for full copyright and licensing details. from odoo import api,", "Part of BrowseInfo. See LICENSE file for full copyright and licensing details. from", "-*- coding: utf-8 -*- # Part of BrowseInfo. See LICENSE file for full", "tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj = self", "= fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj", "wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date = wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj =", "= { 'name': action.name, 'help': action.help, 'type': action.type, 'views': [ [list_view_id,'tree' ]], 'target':", "-*- # Part of BrowseInfo. See LICENSE file for full copyright and licensing", "action.target, 'context': action.context, 'res_model': action.res_model, } if len(new_created_id_list) : result['domain'] = \"[('id','in',%s)]\" %", "True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor',", "'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required =", ":self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id =", "'type': action.type, 'views': [ [list_view_id,'tree' ]], 'target': action.target, 'context': action.context, 'res_model': action.res_model, }", "[ [list_view_id,'tree' ]], 'target': action.target, 'context': action.context, 'res_model': action.res_model, } if len(new_created_id_list) :", "True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def", "self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name, 'help':", "phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date = wizard_obj.request_date for test_id in wizard_obj.tests_ids:", "for test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name", "= imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name, 'help': action.help, 'type':", "action.type, 'views': [ [list_view_id,'tree' ]], 'target': action.target, 'context': action.context, 'res_model': action.res_model, } if", "fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj = self patient_id =", "from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date',", "'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data'] action", "required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id", "Date', required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True) urgent = fields.Boolean('Urgent',)", "new_created_id_list: imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = {", "= fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id',", "= lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id':", "BrowseInfo. See LICENSE file for full copyright and licensing details. from odoo import", "action.context, 'res_model': action.res_model, } if len(new_created_id_list) : result['domain'] = \"[('id','in',%s)]\" % new_created_id_list return", "wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test']", "# -*- coding: utf-8 -*- # Part of BrowseInfo. See LICENSE file for", "'context': action.context, 'res_model': action.res_model, } if len(new_created_id_list) : result['domain'] = \"[('id','in',%s)]\" % new_created_id_list", "wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date", "imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name, 'help': action.help, 'type': action.type,", "wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel'," ]
[ "= models.IntegerField() class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20) title = models.CharField(max_length=255) content =", "blank=True, null=True) quota = models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100, blank=True, null=True) credit", "name = models.CharField(max_length=20, blank=True, null=True) class Meta: managed = False db_table = 'user'", "model module. # You'll have to do the following manually to clean this", "False db_table = 'django_migrations' class Post(models.Model): post_id = models.IntegerField() class_code = models.CharField(max_length=20) author_id", "= models.IntegerField() flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table =", "up: # * Rearrange models' order # * Make sure each model has", "* Make sure each model has one field with primary_key=True # * Remove", "delete the table # Feel free to rename the models, but don't rename", "Django model module. # You'll have to do the following manually to clean", "sqlcustom [app_label]' # into your database. from __future__ import unicode_literals from django.db import", "models.IntegerField() class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) author_id", "class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50,", "= False db_table = 'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100)", "models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512, blank=True, null=True) name", "to allow Django to create, modify, and delete the table # Feel free", "to clean this up: # * Rearrange models' order # * Make sure", "flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'comment' class", "table # Feel free to rename the models, but don't rename db_table values", "the output of 'django-admin sqlcustom [app_label]' # into your database. from __future__ import", "models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag = models.IntegerField(blank=True, null=True) class Meta: managed =", "class Meta: managed = False db_table = 'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255)", "class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True)", "the following manually to clean this up: # * Rearrange models' order #", "import unicode_literals from django.db import models class Comment(models.Model): comment_id = models.IntegerField() class_code =", "= 'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10,", "null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20, blank=True, null=True) class_lan =", "model has one field with primary_key=True # * Remove `managed = False` lines", "# # Also note: You'll have to insert the output of 'django-admin sqlcustom", "models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True) class Meta: managed =", "content = models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag =", "= models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag = models.IntegerField(blank=True,", "name = models.CharField(max_length=255) applied = models.DateTimeField() class Meta: managed = False db_table =", "models.CharField(max_length=20) title = models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) hit", "= False db_table = 'post' class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id =", "to do the following manually to clean this up: # * Rearrange models'", "order # * Make sure each model has one field with primary_key=True #", "null=True) class Meta: managed = False db_table = 'post' class User(models.Model): klas_id =", "managed = False db_table = 'post' class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id", "this up: # * Rearrange models' order # * Make sure each model", "null=True) hit = models.IntegerField() flag = models.IntegerField(blank=True, null=True) class Meta: managed = False", "models class Comment(models.Model): comment_id = models.IntegerField() class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True)", "null=True) campus = models.CharField(max_length=10, blank=True, null=True) class Meta: managed = False db_table =", "blank=True, null=True) notice = models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10, blank=True, null=True) class", "models.CharField(max_length=255) applied = models.DateTimeField() class Meta: managed = False db_table = 'django_migrations' class", "= models.CharField(max_length=20) title = models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True)", "# * Remove `managed = False` lines if you wish to allow Django", "models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True)", "hit = models.IntegerField() flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table", "names. # # Also note: You'll have to insert the output of 'django-admin", "Meta: managed = False db_table = 'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name", "to create, modify, and delete the table # Feel free to rename the", "blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100, blank=True, null=True) campus", "naver_id = models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20,", "False db_table = 'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied", "models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content = models.TextField(blank=True, null=True)", "or field names. # # Also note: You'll have to insert the output", "models.DateTimeField() class Meta: managed = False db_table = 'django_migrations' class Post(models.Model): post_id =", "= models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10, blank=True, null=True) instructor", "output of 'django-admin sqlcustom [app_label]' # into your database. from __future__ import unicode_literals", "applied = models.DateTimeField() class Meta: managed = False db_table = 'django_migrations' class Post(models.Model):", "rename db_table values or field names. # # Also note: You'll have to", "null=True) class Meta: managed = False db_table = 'course_2018_20' class DjangoMigrations(models.Model): app =", "= 'django_migrations' class Post(models.Model): post_id = models.IntegerField() class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20)", "Meta: managed = False db_table = 'post' class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20)", "False db_table = 'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year", "models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10, blank=True, null=True)", "DjangoMigrations(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField() class Meta: managed", "field names. # # Also note: You'll have to insert the output of", "null=True) instructor = models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room =", "blank=True, null=True) credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type", "managed = False db_table = 'django_migrations' class Post(models.Model): post_id = models.IntegerField() class_code =", "models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20, blank=True, null=True)", "null=True) class_lan = models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100, blank=True, null=True) campus =", "class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True) lectures =", "= 'post' class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True)", "db_table = 'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied =", "# into your database. from __future__ import unicode_literals from django.db import models class", "content = models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True) class Meta: managed = False", "models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag = models.IntegerField(blank=True, null=True)", "False db_table = 'post' class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20,", "models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'comment' class Course20182(models.Model): class_code", "null=True) credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type =", "have to do the following manually to clean this up: # * Rearrange", "= models.CharField(max_length=10, blank=True, null=True) class Meta: managed = False db_table = 'course_2018_20' class", "models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'post' class User(models.Model): klas_id", "null=True) lectures = models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20, blank=True, null=True) class Meta:", "`managed = False` lines if you wish to allow Django to create, modify,", "= models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag = models.IntegerField(blank=True, null=True) class Meta: managed", "# You'll have to do the following manually to clean this up: #", "= models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100, blank=True,", "= models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20, blank=True, null=True) class Meta: managed =", "class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True) quota =", "don't rename db_table values or field names. # # Also note: You'll have", "'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True,", "= models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True,", "null=True) notice = models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10, blank=True, null=True) class Meta:", "null=True) name = models.CharField(max_length=20, blank=True, null=True) class Meta: managed = False db_table =", "null=True) class_type = models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True, null=True) notice =", "rename the models, but don't rename db_table values or field names. # #", "managed = False db_table = 'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name =", "# * Make sure each model has one field with primary_key=True # *", "managed = False db_table = 'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name =", "null=True) create_date = models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag = models.IntegerField(blank=True, null=True) class", "models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True, null=True)", "= models.CharField(max_length=255) applied = models.DateTimeField() class Meta: managed = False db_table = 'django_migrations'", "blank=True, null=True) campus = models.CharField(max_length=10, blank=True, null=True) class Meta: managed = False db_table", "blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20, blank=True, null=True) class_lan", "= models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) hit = models.IntegerField()", "blank=True, null=True) instructor = models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room", "models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10, blank=True,", "db_table values or field names. # # Also note: You'll have to insert", "db_table = 'post' class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True,", "You'll have to insert the output of 'django-admin sqlcustom [app_label]' # into your", "Comment(models.Model): comment_id = models.IntegerField() class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date =", "author_id = models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True) class Meta:", "models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True)", "null=True) class Meta: managed = False db_table = 'comment' class Course20182(models.Model): class_code =", "klas_id = models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512, blank=True,", "Django to create, modify, and delete the table # Feel free to rename", "Rearrange models' order # * Make sure each model has one field with", "lectures = models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20, blank=True, null=True) class Meta: managed", "= False` lines if you wish to allow Django to create, modify, and", "the table # Feel free to rename the models, but don't rename db_table", "from __future__ import unicode_literals from django.db import models class Comment(models.Model): comment_id = models.IntegerField()", "blank=True, null=True) name = models.CharField(max_length=20, blank=True, null=True) class Meta: managed = False db_table", "= models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10, blank=True, null=True) class Meta: managed =", "class Meta: managed = False db_table = 'post' class User(models.Model): klas_id = models.CharField(primary_key=True,", "values or field names. # # Also note: You'll have to insert the", "null=True) create_date = models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag", "field with primary_key=True # * Remove `managed = False` lines if you wish", "notice = models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10, blank=True, null=True) class Meta: managed", "is an auto-generated Django model module. # You'll have to do the following", "Feel free to rename the models, but don't rename db_table values or field", "= False db_table = 'django_migrations' class Post(models.Model): post_id = models.IntegerField() class_code = models.CharField(max_length=20)", "blank=True, null=True) lectures = models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20, blank=True, null=True) class", "'django-admin sqlcustom [app_label]' # into your database. from __future__ import unicode_literals from django.db", "class_type = models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100,", "Remove `managed = False` lines if you wish to allow Django to create,", "one field with primary_key=True # * Remove `managed = False` lines if you", "models' order # * Make sure each model has one field with primary_key=True", "sure each model has one field with primary_key=True # * Remove `managed =", "models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10, blank=True, null=True)", "class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField() class Meta:", "= models.CharField(max_length=20) author_id = models.CharField(max_length=20) title = models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date", "class Meta: managed = False db_table = 'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20)", "create_date = models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag = models.IntegerField(blank=True, null=True) class Meta:", "'post' class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True) lectures", "insert the output of 'django-admin sqlcustom [app_label]' # into your database. from __future__", "class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20) title = models.CharField(max_length=255) content = models.TextField(blank=True, null=True)", "blank=True, null=True) class Meta: managed = False db_table = 'course_2018_20' class DjangoMigrations(models.Model): app", "= models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20)", "models.CharField(max_length=20) author_id = models.CharField(max_length=20) title = models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date =", "models.IntegerField() flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'post'", "Post(models.Model): post_id = models.IntegerField() class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20) title = models.CharField(max_length=255)", "= models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10, blank=True,", "= models.DateTimeField() class Meta: managed = False db_table = 'django_migrations' class Post(models.Model): post_id", "clean this up: # * Rearrange models' order # * Make sure each", "create, modify, and delete the table # Feel free to rename the models,", "credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20,", "note: You'll have to insert the output of 'django-admin sqlcustom [app_label]' # into", "campus = models.CharField(max_length=10, blank=True, null=True) class Meta: managed = False db_table = 'course_2018_20'", "an auto-generated Django model module. # You'll have to do the following manually", "into your database. from __future__ import unicode_literals from django.db import models class Comment(models.Model):", "__future__ import unicode_literals from django.db import models class Comment(models.Model): comment_id = models.IntegerField() class_code", "models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content", "models.IntegerField() class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20) title = models.CharField(max_length=255) content = models.TextField(blank=True,", "blank=True, null=True) class_type = models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True, null=True) notice", "You'll have to do the following manually to clean this up: # *", "models.CharField(max_length=10, blank=True, null=True) class Meta: managed = False db_table = 'course_2018_20' class DjangoMigrations(models.Model):", "= models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'post' class User(models.Model):", "Make sure each model has one field with primary_key=True # * Remove `managed", "= models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True) class Meta: managed", "allow Django to create, modify, and delete the table # Feel free to", "comment_id = models.IntegerField() class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True,", "Meta: managed = False db_table = 'django_migrations' class Post(models.Model): post_id = models.IntegerField() class_code", "[app_label]' # into your database. from __future__ import unicode_literals from django.db import models", "models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20, blank=True, null=True)", "models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20, blank=True, null=True) class Meta: managed = False", "models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10, blank=True, null=True) class Meta: managed = False", "modify, and delete the table # Feel free to rename the models, but", "instructor = models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500,", "class_year = models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100,", "'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField() class", "null=True) flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'comment'", "models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag", "module. # You'll have to do the following manually to clean this up:", "post_id = models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content =", "False` lines if you wish to allow Django to create, modify, and delete", "has one field with primary_key=True # * Remove `managed = False` lines if", "title = models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) hit =", "database. from __future__ import unicode_literals from django.db import models class Comment(models.Model): comment_id =", "# Feel free to rename the models, but don't rename db_table values or", "to rename the models, but don't rename db_table values or field names. #", "= models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content = models.TextField(blank=True,", "class Meta: managed = False db_table = 'django_migrations' class Post(models.Model): post_id = models.IntegerField()", "have to insert the output of 'django-admin sqlcustom [app_label]' # into your database.", "class Comment(models.Model): comment_id = models.IntegerField() class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date", "= models.IntegerField() class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True)", "class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) author_id =", "class Post(models.Model): post_id = models.IntegerField() class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20) title =", "app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField() class Meta: managed =", "= models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512, blank=True, null=True)", "each model has one field with primary_key=True # * Remove `managed = False`", "django.db import models class Comment(models.Model): comment_id = models.IntegerField() class_code = models.CharField(max_length=20) post_id =", "if you wish to allow Django to create, modify, and delete the table", "lines if you wish to allow Django to create, modify, and delete the", "db_table = 'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year =", "the models, but don't rename db_table values or field names. # # Also", "Course20182(models.Model): class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True) quota", "= False db_table = 'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255)", "models, but don't rename db_table values or field names. # # Also note:", "auto-generated Django model module. # You'll have to do the following manually to", "# This is an auto-generated Django model module. # You'll have to do", "do the following manually to clean this up: # * Rearrange models' order", "and delete the table # Feel free to rename the models, but don't", "'django_migrations' class Post(models.Model): post_id = models.IntegerField() class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20) title", "= models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True,", "= models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table", "= models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20, blank=True,", "models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100, blank=True, null=True)", "max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512, blank=True, null=True) name =", "= 'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField()", "User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512,", "# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'", "flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'post' class", "class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10, blank=True, null=True)", "null=True) quota = models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100, blank=True, null=True) credit =", "= models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10, blank=True,", "primary_key=True # * Remove `managed = False` lines if you wish to allow", "free to rename the models, but don't rename db_table values or field names.", "null=True) author_id = models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True) class", "wish to allow Django to create, modify, and delete the table # Feel", "quota = models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10,", "unicode_literals from django.db import models class Comment(models.Model): comment_id = models.IntegerField() class_code = models.CharField(max_length=20)", "models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField() class Meta: managed = False db_table", "author_id = models.CharField(max_length=20) title = models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True,", "* Rearrange models' order # * Make sure each model has one field", "This is an auto-generated Django model module. # You'll have to do the", "models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100, blank=True, null=True)", "class_lan = models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10,", "with primary_key=True # * Remove `managed = False` lines if you wish to", "from django.db import models class Comment(models.Model): comment_id = models.IntegerField() class_code = models.CharField(max_length=20) post_id", "models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10, blank=True, null=True) instructor =", "import models class Comment(models.Model): comment_id = models.IntegerField() class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True,", "following manually to clean this up: # * Rearrange models' order # *", "= models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'comment' class Course20182(models.Model):", "= models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100, blank=True,", "Meta: managed = False db_table = 'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name", "to insert the output of 'django-admin sqlcustom [app_label]' # into your database. from", "models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table =", "of 'django-admin sqlcustom [app_label]' # into your database. from __future__ import unicode_literals from", "but don't rename db_table values or field names. # # Also note: You'll", "db_table = 'django_migrations' class Post(models.Model): post_id = models.IntegerField() class_code = models.CharField(max_length=20) author_id =", "# * Rearrange models' order # * Make sure each model has one", "create_date = models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag =", "you wish to allow Django to create, modify, and delete the table #", "manually to clean this up: # * Rearrange models' order # * Make", "* Remove `managed = False` lines if you wish to allow Django to", "Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]' #", "= models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True,", "post_id = models.IntegerField() class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20) title = models.CharField(max_length=255) content", "= models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10,", "= models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20, blank=True,", "your database. from __future__ import unicode_literals from django.db import models class Comment(models.Model): comment_id", "= models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField() class Meta: managed = False" ]
[ "dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename, output_filename,", "<reponame>yoavram-lab/EffectiveNPI import click @click.command() @click.argument('aux_filename', default='ms.aux', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True,", "lines: for c in line.split(','): citations.add(c) if verbose: print(\"Found {} citations in {}\".format(len(citations),", "@click.argument('aux_filename', default='ms.aux', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose',", "import click @click.command() @click.argument('aux_filename', default='ms.aux', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False,", "f.readlines() lines = (line.strip() for line in lines if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1]", "file_okay=True, dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename,", "verbose): with open(aux_filename) as f: lines = f.readlines() lines = (line.strip() for line", "for line in lines if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for line in lines)", "output_filename, verbose): with open(aux_filename) as f: lines = f.readlines() lines = (line.strip() for", "= (line[len('\\citation{'):-1] for line in lines) citations = set() for line in lines:", "type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename, output_filename, verbose): with open(aux_filename) as", "dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename, output_filename, verbose): with open(aux_filename) as f:", "(line[len('\\citation{'):-1] for line in lines) citations = set() for line in lines: for", "= set() for line in lines: for c in line.split(','): citations.add(c) if verbose:", "= f.readlines() lines = (line.strip() for line in lines if line.startswith(r'\\citation')) lines =", "c in line.split(','): citations.add(c) if verbose: print(\"Found {} citations in {}\".format(len(citations), aux_filename)) with", "in line.split(','): citations.add(c) if verbose: print(\"Found {} citations in {}\".format(len(citations), aux_filename)) with open(output_filename,", "verbose: print(\"Found {} citations in {}\".format(len(citations), aux_filename)) with open(output_filename, 'wt') as f: f.write('\\n'.join(sorted(citations)))", "as f: lines = f.readlines() lines = (line.strip() for line in lines if", "click @click.command() @click.argument('aux_filename', default='ms.aux', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True))", "for c in line.split(','): citations.add(c) if verbose: print(\"Found {} citations in {}\".format(len(citations), aux_filename))", "line in lines) citations = set() for line in lines: for c in", "line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for line in lines) citations = set() for line", "in lines if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for line in lines) citations =", "'--verbose/--no-verbose', default=False) def main(aux_filename, output_filename, verbose): with open(aux_filename) as f: lines = f.readlines()", "with open(aux_filename) as f: lines = f.readlines() lines = (line.strip() for line in", "type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def", "if verbose: print(\"Found {} citations in {}\".format(len(citations), aux_filename)) with open(output_filename, 'wt') as f:", "in lines: for c in line.split(','): citations.add(c) if verbose: print(\"Found {} citations in", "in {}\".format(len(citations), aux_filename)) with open(output_filename, 'wt') as f: f.write('\\n'.join(sorted(citations))) if __name__ == '__main__':", "lines = f.readlines() lines = (line.strip() for line in lines if line.startswith(r'\\citation')) lines", "citations = set() for line in lines: for c in line.split(','): citations.add(c) if", "= (line.strip() for line in lines if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for line", "main(aux_filename, output_filename, verbose): with open(aux_filename) as f: lines = f.readlines() lines = (line.strip()", "f: lines = f.readlines() lines = (line.strip() for line in lines if line.startswith(r'\\citation'))", "{} citations in {}\".format(len(citations), aux_filename)) with open(output_filename, 'wt') as f: f.write('\\n'.join(sorted(citations))) if __name__", "@click.command() @click.argument('aux_filename', default='ms.aux', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V',", "lines) citations = set() for line in lines: for c in line.split(','): citations.add(c)", "lines = (line[len('\\citation{'):-1] for line in lines) citations = set() for line in", "print(\"Found {} citations in {}\".format(len(citations), aux_filename)) with open(output_filename, 'wt') as f: f.write('\\n'.join(sorted(citations))) if", "open(aux_filename) as f: lines = f.readlines() lines = (line.strip() for line in lines", "{}\".format(len(citations), aux_filename)) with open(output_filename, 'wt') as f: f.write('\\n'.join(sorted(citations))) if __name__ == '__main__': main()", "readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename, output_filename, verbose):", "line in lines: for c in line.split(','): citations.add(c) if verbose: print(\"Found {} citations", "line in lines if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for line in lines) citations", "for line in lines: for c in line.split(','): citations.add(c) if verbose: print(\"Found {}", "default='ms.aux', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True)) @click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False)", "@click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename, output_filename, verbose): with", "(line.strip() for line in lines if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for line in", "def main(aux_filename, output_filename, verbose): with open(aux_filename) as f: lines = f.readlines() lines =", "lines if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for line in lines) citations = set()", "in lines) citations = set() for line in lines: for c in line.split(','):", "citations.add(c) if verbose: print(\"Found {} citations in {}\".format(len(citations), aux_filename)) with open(output_filename, 'wt') as", "lines = (line.strip() for line in lines if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for", "if line.startswith(r'\\citation')) lines = (line[len('\\citation{'):-1] for line in lines) citations = set() for", "default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename, output_filename, verbose): with open(aux_filename)", "citations in {}\".format(len(citations), aux_filename)) with open(output_filename, 'wt') as f: f.write('\\n'.join(sorted(citations))) if __name__ ==", "@click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename, output_filename, verbose): with open(aux_filename) as f: lines =", "for line in lines) citations = set() for line in lines: for c", "line.split(','): citations.add(c) if verbose: print(\"Found {} citations in {}\".format(len(citations), aux_filename)) with open(output_filename, 'wt')", "set() for line in lines: for c in line.split(','): citations.add(c) if verbose: print(\"Found", "default=False) def main(aux_filename, output_filename, verbose): with open(aux_filename) as f: lines = f.readlines() lines", "writable=True)) @click.option('-v/-V', '--verbose/--no-verbose', default=False) def main(aux_filename, output_filename, verbose): with open(aux_filename) as f: lines" ]
[ "import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import ProGED as pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0)", "as pd import sys import os import pickle import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\")", "numpy as np import pandas as pd import sys import os import pickle", "sys.path.append(os.getcwd()+\"/source\") import ProGED as pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ == \"__main__\":", "= \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN: \" + str(eqN) + \", file: \"", "\"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN: \" + str(eqN) + \", file: \" +", "7 11:22:45 2021 @author: jureb \"\"\" import numpy as np import pandas as", "int(sys.argv[1]) modelsfile = sys.argv[2] processN = int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile)", "processN = int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN: \" + str(eqN)", "__name__ == \"__main__\": datadir = \"\" eqN = int(sys.argv[1]) modelsfile = sys.argv[2] processN", "multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import ProGED as pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if", "coding: utf-8 -*- \"\"\" Created on Thu Oct 7 11:22:45 2021 @author: jureb", "\"\"\" import numpy as np import pandas as pd import sys import os", "== \"__main__\": datadir = \"\" eqN = int(sys.argv[1]) modelsfile = sys.argv[2] processN =", "reference = pd.read_csv(eqfile) print(\"eqN: \" + str(eqN) + \", file: \" + reference[\"Filename\"][eqN])", "2021 @author: jureb \"\"\" import numpy as np import pandas as pd import", "\"rb\") as file: models = pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting models\") models =", "data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity = 1) print(\"--Exporting results\") with open(\"results/\" +", "-*- coding: utf-8 -*- \"\"\" Created on Thu Oct 7 11:22:45 2021 @author:", "import pickle import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import ProGED as pg import warnings", "pool_map = pool.map, verbosity = 1) print(\"--Exporting results\") with open(\"results/\" + modelsfile.split(\".\")[0] +", "as np import pandas as pd import sys import os import pickle import", "# -*- coding: utf-8 -*- \"\"\" Created on Thu Oct 7 11:22:45 2021", "np import pandas as pd import sys import os import pickle import multiprocessing", "target_variable_index=-1, pool_map = pool.map, verbosity = 1) print(\"--Exporting results\") with open(\"results/\" + modelsfile.split(\".\")[0]", "\", file: \" + reference[\"Filename\"][eqN]) data = np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000)", "= mp.Pool(processN) print(\"--Fitting models\") models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity", "pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ == \"__main__\": datadir = \"\" eqN", "pool.map, verbosity = 1) print(\"--Exporting results\") with open(\"results/\" + modelsfile.split(\".\")[0] + \"_fit.models\", \"wb\")", "mp.Pool(processN) print(\"--Fitting models\") models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity =", "= np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile, \"rb\") as", "= \"\" eqN = int(sys.argv[1]) modelsfile = sys.argv[2] processN = int(sys.argv[3]) eqfile =", "#warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ == \"__main__\": datadir = \"\" eqN = int(sys.argv[1]) modelsfile", "= pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity = 1) print(\"--Exporting results\") with", "\"__main__\": datadir = \"\" eqN = int(sys.argv[1]) modelsfile = sys.argv[2] processN = int(sys.argv[3])", "sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile, \"rb\") as file: models = pickle.load(file)", "datadir = \"\" eqN = int(sys.argv[1]) modelsfile = sys.argv[2] processN = int(sys.argv[3]) eqfile", "reference[\"Filename\"][eqN]) data = np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile,", "pickle import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import ProGED as pg import warnings #warnings.filterwarnings(\"ignore\")", "verbosity = 1) print(\"--Exporting results\") with open(\"results/\" + modelsfile.split(\".\")[0] + \"_fit.models\", \"wb\") as", "<reponame>smeznar/ProGED<filename>utils/estimation_for_grid_Feynman_database.py # -*- coding: utf-8 -*- \"\"\" Created on Thu Oct 7 11:22:45", "eqfile = \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN: \" + str(eqN) + \", file:", "1) print(\"--Exporting results\") with open(\"results/\" + modelsfile.split(\".\")[0] + \"_fit.models\", \"wb\") as file: pickle.dump(models,", "eqN = int(sys.argv[1]) modelsfile = sys.argv[2] processN = int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference", "pool = mp.Pool(processN) print(\"--Fitting models\") models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map,", "str(eqN) + \", file: \" + reference[\"Filename\"][eqN]) data = np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind", "\" + str(eqN) + \", file: \" + reference[\"Filename\"][eqN]) data = np.loadtxt(datadir +", "with open(modelsfile, \"rb\") as file: models = pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting models\")", "import numpy as np import pandas as pd import sys import os import", "int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN: \" + str(eqN) + \",", "\"\" eqN = int(sys.argv[1]) modelsfile = sys.argv[2] processN = int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\"", "= np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile, \"rb\") as file: models = pickle.load(file) pool", "pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting models\") models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map =", "pandas as pd import sys import os import pickle import multiprocessing as mp", "+ reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile, \"rb\") as file: models", "file: \" + reference[\"Filename\"][eqN]) data = np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading", "import sys import os import pickle import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import ProGED", "= int(sys.argv[1]) modelsfile = sys.argv[2] processN = int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference =", "models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity = 1) print(\"--Exporting results\")", "open(modelsfile, \"rb\") as file: models = pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting models\") models", "jureb \"\"\" import numpy as np import pandas as pd import sys import", "print(\"--Exporting results\") with open(\"results/\" + modelsfile.split(\".\")[0] + \"_fit.models\", \"wb\") as file: pickle.dump(models, file)", "= 1) print(\"--Exporting results\") with open(\"results/\" + modelsfile.split(\".\")[0] + \"_fit.models\", \"wb\") as file:", "modelsfile = sys.argv[2] processN = int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN:", "+ reference[\"Filename\"][eqN]) data = np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\") with", "sys import os import pickle import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import ProGED as", "as pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ == \"__main__\": datadir = \"\"", "as file: models = pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting models\") models = pg.fit_models(models,", "@author: jureb \"\"\" import numpy as np import pandas as pd import sys", "as mp sys.path.append(os.getcwd()+\"/source\") import ProGED as pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__", "= pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting models\") models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map", "-*- \"\"\" Created on Thu Oct 7 11:22:45 2021 @author: jureb \"\"\" import", "import os import pickle import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import ProGED as pg", "on Thu Oct 7 11:22:45 2021 @author: jureb \"\"\" import numpy as np", "pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity = 1) print(\"--Exporting results\") with open(\"results/\"", "warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ == \"__main__\": datadir = \"\" eqN = int(sys.argv[1])", "ProGED as pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ == \"__main__\": datadir =", "pd.read_csv(eqfile) print(\"eqN: \" + str(eqN) + \", file: \" + reference[\"Filename\"][eqN]) data =", "Oct 7 11:22:45 2021 @author: jureb \"\"\" import numpy as np import pandas", "np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile, \"rb\") as file: models = pickle.load(file) pool =", "file: models = pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting models\") models = pg.fit_models(models, data[sampleind],", "print(\"--Fitting models\") models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity = 1)", "utf-8 -*- \"\"\" Created on Thu Oct 7 11:22:45 2021 @author: jureb \"\"\"", "\"\"\" Created on Thu Oct 7 11:22:45 2021 @author: jureb \"\"\" import numpy", "data = np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile, \"rb\")", "sys.argv[2] processN = int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN: \" +", "import pandas as pd import sys import os import pickle import multiprocessing as", "reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile, \"rb\") as file: models =", "11:22:45 2021 @author: jureb \"\"\" import numpy as np import pandas as pd", "= int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN: \" + str(eqN) +", "models = pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting models\") models = pg.fit_models(models, data[sampleind], target_variable_index=-1,", "np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\") with open(modelsfile, \"rb\") as file:", "\" + reference[\"Filename\"][eqN]) data = np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind = np.random.randint(0,10**6,1000) print(\"--Loading models\")", "= pool.map, verbosity = 1) print(\"--Exporting results\") with open(\"results/\" + modelsfile.split(\".\")[0] + \"_fit.models\",", "+ \", file: \" + reference[\"Filename\"][eqN]) data = np.loadtxt(datadir + reference[\"Filename\"][eqN]) sampleind =", "Created on Thu Oct 7 11:22:45 2021 @author: jureb \"\"\" import numpy as", "import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ == \"__main__\": datadir = \"\" eqN =", "print(\"--Loading models\") with open(modelsfile, \"rb\") as file: models = pickle.load(file) pool = mp.Pool(processN)", "Thu Oct 7 11:22:45 2021 @author: jureb \"\"\" import numpy as np import", "pd import sys import os import pickle import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import", "models\") with open(modelsfile, \"rb\") as file: models = pickle.load(file) pool = mp.Pool(processN) print(\"--Fitting", "if __name__ == \"__main__\": datadir = \"\" eqN = int(sys.argv[1]) modelsfile = sys.argv[2]", "np.random.seed(0) if __name__ == \"__main__\": datadir = \"\" eqN = int(sys.argv[1]) modelsfile =", "models\") models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity = 1) print(\"--Exporting", "os import pickle import multiprocessing as mp sys.path.append(os.getcwd()+\"/source\") import ProGED as pg import", "print(\"eqN: \" + str(eqN) + \", file: \" + reference[\"Filename\"][eqN]) data = np.loadtxt(datadir", "import ProGED as pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ == \"__main__\": datadir", "= sys.argv[2] processN = int(sys.argv[3]) eqfile = \"source/FeynmanEquations.csv\" reference = pd.read_csv(eqfile) print(\"eqN: \"", "+ str(eqN) + \", file: \" + reference[\"Filename\"][eqN]) data = np.loadtxt(datadir + reference[\"Filename\"][eqN])", "= pd.read_csv(eqfile) print(\"eqN: \" + str(eqN) + \", file: \" + reference[\"Filename\"][eqN]) data", "mp sys.path.append(os.getcwd()+\"/source\") import ProGED as pg import warnings #warnings.filterwarnings(\"ignore\") np.random.seed(0) if __name__ ==" ]
[ "from collections import namedtuple from edward.models import ( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag,", "namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError): y = ed.transform(x) if __name__ ==", "1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self):", "import tensorflow as tf from collections import namedtuple from edward.models import ( Beta,", "= ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session():", "0.0), axis=0, keepdims=True) num_neg = np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all())", "test_nonnegative(self): with self.test_session(): x = Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample", "1.0) y = ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self):", "> 0).all()) def test_args(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x,", "1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self):", "y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with", "tf from collections import namedtuple from edward.models import ( Beta, Dirichlet, DirichletProcess, Gamma,", "1.0) y = ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self):", "as np import tensorflow as tf from collections import namedtuple from edward.models import", "self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x =", "self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample", "Dirichlet([1.1, 1.2, 1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval()", "y = ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self): with", "> 0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self): with self.test_session(): x = Normal(-100.0, 1.0)", "keepdims=True) num_neg = np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg >", "x = Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval()", "bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self): with self.test_session(): x =", "import print_function import edward as ed import numpy as np import tensorflow as", "= y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with self.test_session(): x = Normal(-100.0, 1.0)", "np.sum((sample > 0.0), axis=0, keepdims=True) num_neg = np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos", "x = FakeRV(support='rational') with self.assertRaises(ValueError): y = ed.transform(x) if __name__ == '__main__': tf.test.main()", "y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with self.test_session(): x = Normal(-100.0, 1.0) y", "def test_kwargs(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample", "with self.test_session(): x = Dirichlet([1.1, 1.2, 1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution)", "= Dirichlet([1.1, 1.2, 1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10,", "['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError): y = ed.transform(x) if __name__ == '__main__':", "0).all()) def test_args(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus())", "Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def", "with self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError): y =", "def test_real(self): with self.test_session(): x = Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, Normal)", "FakeRV = namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError): y = ed.transform(x) if", "with self.test_session(): x = Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample =", "self.test_session(): x = Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10,", "from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos = np.sum((sample >", "= ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session():", "with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x) def", "x = Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval()", "test_01(self): with self.test_session(): x = Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample", "def test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError):", "TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x = Gamma(1.0,", "0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self): with self.test_session():", "import edward as ed import numpy as np import tensorflow as tf from", "self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError): y = ed.transform(x)", "y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self): with self.test_session(): x = Beta(1.0, 1.0) y", "import division from __future__ import print_function import edward as ed import numpy as", "= Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample)", "1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def", "y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x = Gamma(1.0, 1.0) y =", "= y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x = Gamma(1.0, 1.0) y", "self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self):", "Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos =", "y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with", "= y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x = Dirichlet([1.1, 1.2, 1.3,", "x = Dirichlet([1.1, 1.2, 1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample =", "y = ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x", "print_function import edward as ed import numpy as np import tensorflow as tf", "test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x)", "= y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x = Normal(0.0, 1.0) y", "self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x = Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y,", "< 0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self): with", "edward as ed import numpy as np import tensorflow as tf from collections", "= ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x = FakeRV(support='rational')", "absolute_import from __future__ import division from __future__ import print_function import edward as ed", "with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample = y.sample(10, seed=1).eval()", "Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV =", "np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self):", "seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x = Dirichlet([1.1, 1.2, 1.3, 1.4]) y", "TransformedDistribution) from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos = np.sum((sample", "num_neg = np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all())", "edward.models import ( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions", "x = Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >=", "MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample):", "TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x = Dirichlet([1.1,", "0.0).all()) def test_kwargs(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus())", "y = ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with", "self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x", "self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y", "collections import namedtuple from edward.models import ( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal,", "from edward.models import ( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from", "test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError): y", "sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self): with self.test_session(): x = Beta(1.0,", "1.2, 1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample)", "division from __future__ import print_function import edward as ed import numpy as np", "= Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all())", "= y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0))", "__future__ import division from __future__ import print_function import edward as ed import numpy", "axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self): with self.test_session(): x", "test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample = y.sample(10,", "axis=0, keepdims=True) num_neg = np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg", "self.assertTrue((num_neg > 0).all()) def test_args(self): with self.test_session(): x = Normal(-100.0, 1.0) y =", "def test_nonnegative(self): with self.test_session(): x = Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution)", "with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval()", "self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x =", "sample): num_pos = np.sum((sample > 0.0), axis=0, keepdims=True) num_neg = np.sum((sample < 0.0),", "0.0).all()) def test_01(self): with self.test_session(): x = Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y,", "np import tensorflow as tf from collections import namedtuple from edward.models import (", "Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos", "from __future__ import division from __future__ import print_function import edward as ed import", "tf.ones(2)) y = ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session():", "= y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y", "self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x = Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y,", "class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos = np.sum((sample > 0.0), axis=0, keepdims=True) num_neg", "Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase):", "test_kwargs(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample =", "= y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self): with self.test_session(): x = Beta(1.0, 1.0)", ">= 0.0).all()) def test_01(self): with self.test_session(): x = Beta(1.0, 1.0) y = ed.transform(x)", "DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV", "= Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all())", "Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def", "assertSamplePosNeg(self, sample): num_pos = np.sum((sample > 0.0), axis=0, keepdims=True) num_neg = np.sum((sample <", "with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval()", "= Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample)", "x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def", "__future__ import print_function import edward as ed import numpy as np import tensorflow", "y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y =", "self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample)", "bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with self.test_session(): x =", "self.assertTrue((sample >= 0.0).all()) def test_01(self): with self.test_session(): x = Beta(1.0, 1.0) y =", "= np.sum((sample > 0.0), axis=0, keepdims=True) num_neg = np.sum((sample < 0.0), axis=0, keepdims=True)", "import namedtuple from edward.models import ( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson,", "ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with self.test_session(): x", "sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x = Dirichlet([1.1, 1.2,", "= ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session():", "__future__ import absolute_import from __future__ import division from __future__ import print_function import edward", "with self.test_session(): x = Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, Normal) sample =", "Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def", "self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample", "ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self): with self.test_session(): x", "( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors", "self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample", ">= 0.0).all()) def test_kwargs(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x,", "ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0,", "= ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with self.test_session():", "def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y =", "as ed import numpy as np import tensorflow as tf from collections import", "x = Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval()", "with self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV', ['support'])", "y = ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x =", "0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self): with self.test_session(): x = Normal(-100.0, 1.0) y", "test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos = np.sum((sample > 0.0), axis=0, keepdims=True) num_neg =", "seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x = Normal(0.0, 1.0) y = ed.transform(x)", "tensorflow as tf from collections import namedtuple from edward.models import ( Beta, Dirichlet,", "num_pos = np.sum((sample > 0.0), axis=0, keepdims=True) num_neg = np.sum((sample < 0.0), axis=0,", "sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x = Normal(0.0, 1.0)", "import ( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import", "numpy as np import tensorflow as tf from collections import namedtuple from edward.models", "def test_simplex(self): with self.test_session(): x = Dirichlet([1.1, 1.2, 1.3, 1.4]) y = ed.transform(x)", "seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError):", "y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x = Dirichlet([1.1, 1.2, 1.3, 1.4])", "keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self): with self.test_session(): x =", "test_real(self): with self.test_session(): x = Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, Normal) sample", "test_args(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus()) sample =", "import absolute_import from __future__ import division from __future__ import print_function import edward as", "sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with self.test_session(): x = Normal(-100.0,", "TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x = Normal(0.0,", "ed import numpy as np import tensorflow as tf from collections import namedtuple", "as tf from collections import namedtuple from edward.models import ( Beta, Dirichlet, DirichletProcess,", "self.test_session(): x = Dirichlet([1.1, 1.2, 1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample", "self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x =", "sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2))", "= ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session():", "namedtuple from edward.models import ( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution)", "test_simplex(self): with self.test_session(): x = Dirichlet([1.1, 1.2, 1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y,", "MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with", "y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x = Normal(0.0, 1.0) y =", "Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors class", "y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with", "bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos = np.sum((sample > 0.0), axis=0, keepdims=True)", "ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with", "y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with", "self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self): with self.test_session(): x = Normal(-100.0,", "= ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self): with self.test_session():", "self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x =", "seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x = Gamma(1.0, 1.0) y = ed.transform(x)", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def", "import numpy as np import tensorflow as tf from collections import namedtuple from", "DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def", "y = ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with", "= Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample)", "ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x", "1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self):", "def test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample =", "def test_01(self): with self.test_session(): x = Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution)", "= MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self):", "x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self): with", "sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x = Gamma(1.0, 1.0)", "tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos = np.sum((sample > 0.0),", "1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self):", "= namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError): y = ed.transform(x) if __name__", "= DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self): with self.test_session():", "self.test_session(): x = Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10,", "from __future__ import print_function import edward as ed import numpy as np import", "1.0)) with self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV',", "self.test_session(): x = Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10,", "ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x", "sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0,", "ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x", "Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def", "with self.test_session(): x = Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample =", "> 0.0), axis=0, keepdims=True) num_neg = np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos >", "= np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all()) def", "Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self,", "def assertSamplePosNeg(self, sample): num_pos = np.sum((sample > 0.0), axis=0, keepdims=True) num_neg = np.sum((sample", "self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with self.test_session(): x = Normal(-100.0, 1.0) y =", "x = Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >=", "self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x = Dirichlet([1.1, 1.2, 1.3, 1.4]) y =", "seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x)", "= ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x =", "Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2),", "def test_args(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus()) sample", "ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x", "import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos = np.sum((sample > 0.0), axis=0," ]
[]
[ "systems.commands.index import Command class Start(Command('server.start')): def exec(self): def start_server(server): self.data(\"Starting server\", str(server)) server.start()", "from systems.commands.index import Command class Start(Command('server.start')): def exec(self): def start_server(server): self.data(\"Starting server\", str(server))", "<reponame>dccs-tech/mcmi-cluster from systems.commands.index import Command class Start(Command('server.start')): def exec(self): def start_server(server): self.data(\"Starting server\",", "import Command class Start(Command('server.start')): def exec(self): def start_server(server): self.data(\"Starting server\", str(server)) server.start() self.run_list(self.server_instances,", "Command class Start(Command('server.start')): def exec(self): def start_server(server): self.data(\"Starting server\", str(server)) server.start() self.run_list(self.server_instances, start_server)" ]
[ "map sentence for all chapters, save results in beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG,", "# aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start at min, stop at max-1 # for", "chapter in range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for chapter in range(13,", "doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) # map sentence for all", "in range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i in range(1, 38): r", "range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i in range(1, 38): r =", "# aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i in range(1, 38): r = aligner.findBoundaries('fra', book_id,", "# for chapter in range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i in", "range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for chapter in range(13, 38): #", "at max-1 # for chapter in range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter) #", "from tr.libs.trans import book_mapper from tr.libs.trans import utils from tr.libs.speech import aligner book_id", "stop at max-1 # for chapter in range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter)", "chapter) # for chapter in range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i", "in range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for chapter in range(13, 38):", "utils.Lang.ENG, book_id, 1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) # map", "# start at min, stop at max-1 # for chapter in range(7, 38):", "book_id = '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1,", "i) print(\"Chapitre %s: %s\" % (i, r)) r = aligner.findBoundaries('eng', book_id, i) print(\"Chapter", "r = aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s: %s\" % (i, r)) r =", "chapterToPrint=1) # map sentence for all chapters, save results in beads files #", "book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True) #", "1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) # map sentence for", "book_id, chapters=1, chapterToPrint=1) # map sentence for all chapters, save results in beads", "import utils from tr.libs.speech import aligner book_id = '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' #", "min, stop at max-1 # for chapter in range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id,", "tr.books import book_manager from tr.libs.trans import book_mapper from tr.libs.trans import utils from tr.libs.speech", "start at min, stop at max-1 # for chapter in range(7, 38): #", "beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1) #", "utils.Lang.ENG, book_id) # speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1) #", "book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1)", "# for chapter in range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for chapter", "'20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True)", "%s\" % (i, r)) r = aligner.findBoundaries('eng', book_id, i) print(\"Chapter %s: %s\" %", "range(1, 38): r = aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s: %s\" % (i, r))", "book_id, chapter) for i in range(1, 38): r = aligner.findBoundaries('fra', book_id, i) print(\"Chapitre", "tr.libs.trans import utils from tr.libs.speech import aligner book_id = '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days'", "(i, r)) r = aligner.findBoundaries('eng', book_id, i) print(\"Chapter %s: %s\" % (i, r))", "debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) # map sentence for all chapters,", "# book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1)", "print(\"Chapitre %s: %s\" % (i, r)) r = aligner.findBoundaries('eng', book_id, i) print(\"Chapter %s:", "in range(1, 38): r = aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s: %s\" % (i,", "38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i in range(1, 38): r = aligner.findBoundaries('fra',", "# aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start at min, stop", "sentence for all chapters, save results in beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id)", "import book_manager from tr.libs.trans import book_mapper from tr.libs.trans import utils from tr.libs.speech import", "book_id) # speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start", "book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start at min, stop at max-1", "= aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s: %s\" % (i, r)) r = aligner.findBoundaries('eng',", "chapter in range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i in range(1, 38):", "i in range(1, 38): r = aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s: %s\" %", "= 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA,", "import book_mapper from tr.libs.trans import utils from tr.libs.speech import aligner book_id = '20000LeaguesUnderTheSea'", "for all chapters, save results in beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) #", "book_id, 1) # start at min, stop at max-1 # for chapter in", "aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s: %s\" % (i, r)) r = aligner.findBoundaries('eng', book_id,", "from tr.libs.speech import aligner book_id = '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) #", "# book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id,", "chapter) for i in range(1, 38): r = aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s:", "# map sentence for all chapters, save results in beads files # book_mapper.beadMapBook(utils.Lang.FRA,", "# aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for chapter in range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id,", "38): r = aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s: %s\" % (i, r)) r", "# book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id,", "tr.libs.speech import aligner book_id = '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA,", "chapters=1, chapterToPrint=1) # map sentence for all chapters, save results in beads files", "1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start at min, stop at max-1 #", "from tr.books import book_manager from tr.libs.trans import book_mapper from tr.libs.trans import utils from", "for i in range(1, 38): r = aligner.findBoundaries('fra', book_id, i) print(\"Chapitre %s: %s\"", "book_id, i) print(\"Chapitre %s: %s\" % (i, r)) r = aligner.findBoundaries('eng', book_id, i)", "speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start at min,", "results in beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech # aligner.alignChapter(utils.Lang.ENG, book_id,", "<gh_stars>0 from tr.books import book_manager from tr.libs.trans import book_mapper from tr.libs.trans import utils", "book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1,", "book_mapper from tr.libs.trans import utils from tr.libs.speech import aligner book_id = '20000LeaguesUnderTheSea' book_id", "in beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1)", "aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start at min, stop at max-1 # for chapter", "book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) #", "book_id, chapter) # for chapter in range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for", "utils from tr.libs.speech import aligner book_id = '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id)", "aligner book_id = '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id,", "# speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start at", "1) # start at min, stop at max-1 # for chapter in range(7,", "aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i in range(1, 38): r = aligner.findBoundaries('fra', book_id, i)", "save results in beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech # aligner.alignChapter(utils.Lang.ENG,", "%s: %s\" % (i, r)) r = aligner.findBoundaries('eng', book_id, i) print(\"Chapter %s: %s\"", "import aligner book_id = '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG,", "utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) # map sentence for all chapters, save results in", "aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA, book_id, 1) # start at min, stop at", "chapters, save results in beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech #", "at min, stop at max-1 # for chapter in range(7, 38): # aligner.alignChapter(utils.Lang.ENG,", "from tr.libs.trans import utils from tr.libs.speech import aligner book_id = '20000LeaguesUnderTheSea' book_id =", "for chapter in range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for chapter in", "all chapters, save results in beads files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech", "book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) # map sentence for all chapters, save results", "max-1 # for chapter in range(7, 38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for", "tr.libs.trans import book_mapper from tr.libs.trans import utils from tr.libs.speech import aligner book_id =", "% (i, r)) r = aligner.findBoundaries('eng', book_id, i) print(\"Chapter %s: %s\" % (i,", "= '20000LeaguesUnderTheSea' book_id = 'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False,", "38): # aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for chapter in range(13, 38): # aligner.alignChapter(utils.Lang.FRA,", "files # book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id) # speech # aligner.alignChapter(utils.Lang.ENG, book_id, 1) # aligner.alignChapter(utils.Lang.FRA,", "for chapter in range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter) for i in range(1,", "# book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) # map sentence for all chapters, save", "book_id, 1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1) # map sentence", "'AroundTheWorldIn80Days' # book_manager.downloadBook(book_id) # book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True) # book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG,", "aligner.alignChapter(utils.Lang.ENG, book_id, chapter) # for chapter in range(13, 38): # aligner.alignChapter(utils.Lang.FRA, book_id, chapter)", "book_manager from tr.libs.trans import book_mapper from tr.libs.trans import utils from tr.libs.speech import aligner" ]
[ "mu) * t.pow(std + 1e-8, -1) * (z - mu)).sum(1)) / \\ t.sqrt(t.abs(2", "= mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z = z *", "* p_second[1] - 2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) +", "= Encoder() self.decoder = Decoder() def forward(self, input, z=None): \"\"\" :param input: an", "input, z=None): \"\"\" :param input: an Float tensor with shape of [batch_size, 1,", "@staticmethod def monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z,", "mu.size() log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size)))", "shape of [batch_size, latent_size] if sampling is performed :return: an Float tensor with", "1) z = z * std_repeated + mu_repeated z = z.view(batch_size * 15,", "/ n @staticmethod def normal_prob(z, mu, std): return t.exp(-0.5 * ((z - mu)", "0.5 * t.sum(2 * p_second[1] - 2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1])", "estimation \"\"\" return 0.5 * t.sum(2 * p_second[1] - 2 * p_first[1] +", "mu, std): return t.exp(-0.5 * ((z - mu) * t.pow(std + 1e-8, -1)", "logvar): return (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean()", "result.view(-1, n).sum(1) / n @staticmethod def normal_prob(z, mu, std): return t.exp(-0.5 * ((z", "[batch_size, latent_size] = mu.size() std = t.exp(0.5 * logvar) z = Variable(t.randn([batch_size, 15,", "z * std_repeated + mu_repeated z = z.view(batch_size * 15, -1) return self.decoder(z),", "std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar - t.pow(mu, 2) -", "z: an Float tensor with shape of [batch_size, latent_size] if sampling is performed", "= self.encoder(input) [batch_size, latent_size] = mu.size() std = t.exp(0.5 * logvar) z =", "input): return self.encoder(input) def decode(self, input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std,", "import Decoder import math class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder = Encoder()", "tensor with shape of [batch_size, latent_size] if sampling is performed :return: an Float", "16], [batch_size, 16] \"\"\" mu, logvar = self.encoder(input) [batch_size, latent_size] = mu.size() std", "encode(self, input): return self.encoder(input) def decode(self, input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu,", "math class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder = Encoder() self.decoder = Decoder()", "+ 1e-8, -1) * (z - mu)).sum(1)) / \\ t.sqrt(t.abs(2 * math.pi *", "[batch_size, latent_size] if sampling is performed :return: an Float tensor with shape of", "over latent variables :return: divirgence estimation \"\"\" return 0.5 * t.sum(2 * p_second[1]", "= mu.size() log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size,", "import torch as t import torch.nn as nn import torch.nn.functional as F from", "torch.autograd import Variable from model.encoder import Encoder from model.decoder import Decoder import math", "an Float tensor with shape of [batch_size, latent_size] if sampling is performed :return:", "input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size] = mu.size()", "def normal_prob(z, mu, std): return t.exp(-0.5 * ((z - mu) * t.pow(std +", "from model.encoder import Encoder from model.decoder import Decoder import math class VAE(nn.Module): def", "* t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first,", "p_second[1] - 2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0]", "import torch.nn.functional as F from torch.autograd import Variable from model.encoder import Encoder from", "divirgence estimation \"\"\" return 0.5 * t.sum(2 * p_second[1] - 2 * p_first[1]", "t.sum(2 * p_second[1] - 2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8)", "log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result", "z def encode(self, input): return self.encoder(input) def decode(self, input): return self.decoder(input) @staticmethod def", "latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x - log_p_z return result.view(-1, n).sum(1) / n", "std): return t.exp(-0.5 * ((z - mu) * t.pow(std + 1e-8, -1) *", "* t.pow(std + 1e-8, -1) * (z - mu)).sum(1)) / \\ t.sqrt(t.abs(2 *", "@staticmethod def divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar)", ":params p_first, p_second: tuples with parameters of distribution over latent variables :return: divirgence", "latent_size])) if input.is_cuda: z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated =", "= std.unsqueeze(1).repeat(1, 15, 1) z = z * std_repeated + mu_repeated z =", "mu, std, n): [batch_size, latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z", "p_first, p_second: tuples with parameters of distribution over latent variables :return: divirgence estimation", "of [batch_size, 1, 28, 28] :param z: an Float tensor with shape of", "* std_repeated + mu_repeated z = z.view(batch_size * 15, -1) return self.decoder(z), mu,", "self.encoder = Encoder() self.decoder = Decoder() def forward(self, input, z=None): \"\"\" :param input:", "as t import torch.nn as nn import torch.nn.functional as F from torch.autograd import", "logvar, z def encode(self, input): return self.encoder(input) def decode(self, input): return self.decoder(input) @staticmethod", ":return: an Float tensor with shape of [batch_size, 1, 28, 28], [batch_size, 16],", "= z.view(batch_size * 15, -1) return self.decoder(z), mu, logvar, z def encode(self, input):", "(z - mu)).sum(1)) / \\ t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu,", "with parameters of distribution over latent variables :return: divirgence estimation \"\"\" return 0.5", "- t.exp(logvar) + 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): \"\"\" :params p_first, p_second:", "+ t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] - p_second[0], 2) / (t.exp(p_second[1])", "def __init__(self): super(VAE, self).__init__() self.encoder = Encoder() self.decoder = Decoder() def forward(self, input,", "an Float tensor with shape of [batch_size, 1, 28, 28], [batch_size, 16], [batch_size,", "- mu) * t.pow(std + 1e-8, -1) * (z - mu)).sum(1)) / \\", "VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder = Encoder() self.decoder = Decoder() def forward(self,", "* logvar) z = Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z = z.cuda() mu_repeated", ":param input: an Float tensor with shape of [batch_size, 1, 28, 28] :param", "if sampling is performed :return: an Float tensor with shape of [batch_size, 1,", "decode(self, input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size] =", "std.unsqueeze(1).repeat(1, 15, 1) z = z * std_repeated + mu_repeated z = z.view(batch_size", "torch as t import torch.nn as nn import torch.nn.functional as F from torch.autograd", "return (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean() @staticmethod", "model.decoder import Decoder import math class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder =", "1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): \"\"\" :params p_first, p_second: tuples with parameters", "(-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean() @staticmethod def", "torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from model.encoder", "Variable from model.encoder import Encoder from model.decoder import Decoder import math class VAE(nn.Module):", "\"\"\" mu, logvar = self.encoder(input) [batch_size, latent_size] = mu.size() std = t.exp(0.5 *", "-1) * (z - mu)).sum(1)) / \\ t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod", "log_p_z return result.view(-1, n).sum(1) / n @staticmethod def normal_prob(z, mu, std): return t.exp(-0.5", "import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from", "return self.encoder(input) def decode(self, input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std, n):", "divergence_with_posterior(p_first, p_second): \"\"\" :params p_first, p_second: tuples with parameters of distribution over latent", "= t.exp(0.5 * logvar) z = Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z =", "p_second: tuples with parameters of distribution over latent variables :return: divirgence estimation \"\"\"", "is performed :return: an Float tensor with shape of [batch_size, 1, 28, 28],", "1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z = z * std_repeated + mu_repeated", "Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x - log_p_z return result.view(-1, n).sum(1) /", "return self.decoder(z), mu, logvar, z def encode(self, input): return self.encoder(input) def decode(self, input):", "model.encoder import Encoder from model.decoder import Decoder import math class VAE(nn.Module): def __init__(self):", "shape of [batch_size, 1, 28, 28], [batch_size, 16], [batch_size, 16] \"\"\" mu, logvar", "latent variables :return: divirgence estimation \"\"\" return 0.5 * t.sum(2 * p_second[1] -", "as F from torch.autograd import Variable from model.encoder import Encoder from model.decoder import", "15, latent_size])) if input.is_cuda: z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated", "return result.view(-1, n).sum(1) / n @staticmethod def normal_prob(z, mu, std): return t.exp(-0.5 *", "t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): \"\"\" :params", "/ \\ t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return (-0.5", "tensor with shape of [batch_size, 1, 28, 28], [batch_size, 16], [batch_size, 16] \"\"\"", "- mu)).sum(1)) / \\ t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar):", "* std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar - t.pow(mu, 2)", "std_repeated + mu_repeated z = z.view(batch_size * 15, -1) return self.decoder(z), mu, logvar,", "if input.is_cuda: z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1,", "def forward(self, input, z=None): \"\"\" :param input: an Float tensor with shape of", "input: an Float tensor with shape of [batch_size, 1, 28, 28] :param z:", "latent_size] = mu.size() std = t.exp(0.5 * logvar) z = Variable(t.randn([batch_size, 15, latent_size]))", "n).sum(1) / n @staticmethod def normal_prob(z, mu, std): return t.exp(-0.5 * ((z -", "@staticmethod def normal_prob(z, mu, std): return t.exp(-0.5 * ((z - mu) * t.pow(std", "* ((z - mu) * t.pow(std + 1e-8, -1) * (z - mu)).sum(1))", "def encode(self, input): return self.encoder(input) def decode(self, input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z,", "z = z * std_repeated + mu_repeated z = z.view(batch_size * 15, -1)", "from model.decoder import Decoder import math class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder", "import math class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder = Encoder() self.decoder =", "import Variable from model.encoder import Encoder from model.decoder import Decoder import math class", "1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): \"\"\" :params p_first, p_second: tuples with parameters of", "performed :return: an Float tensor with shape of [batch_size, 1, 28, 28], [batch_size,", "= z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z", "z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1)", "def monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z, mu,", ":param z: an Float tensor with shape of [batch_size, latent_size] if sampling is", "2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] - p_second[0],", "z = Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1,", "mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z = z", "* p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] - p_second[0], 2)", "16] \"\"\" mu, logvar = self.encoder(input) [batch_size, latent_size] = mu.size() std = t.exp(0.5", "__init__(self): super(VAE, self).__init__() self.encoder = Encoder() self.decoder = Decoder() def forward(self, input, z=None):", "[batch_size, 16], [batch_size, 16] \"\"\" mu, logvar = self.encoder(input) [batch_size, latent_size] = mu.size()", "VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x - log_p_z return result.view(-1, n).sum(1)", "1e-8, -1) * (z - mu)).sum(1)) / \\ t.sqrt(t.abs(2 * math.pi * std.prod(1)))", "t.pow(std + 1e-8, -1) * (z - mu)).sum(1)) / \\ t.sqrt(t.abs(2 * math.pi", "of distribution over latent variables :return: divirgence estimation \"\"\" return 0.5 * t.sum(2", "class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder = Encoder() self.decoder = Decoder() def", "def divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) +", "latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)),", "\"\"\" :params p_first, p_second: tuples with parameters of distribution over latent variables :return:", "forward(self, input, z=None): \"\"\" :param input: an Float tensor with shape of [batch_size,", "1, 28, 28], [batch_size, 16], [batch_size, 16] \"\"\" mu, logvar = self.encoder(input) [batch_size,", "with shape of [batch_size, 1, 28, 28], [batch_size, 16], [batch_size, 16] \"\"\" mu,", "= VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x - log_p_z return result.view(-1,", "import Encoder from model.decoder import Decoder import math class VAE(nn.Module): def __init__(self): super(VAE,", "variables :return: divirgence estimation \"\"\" return 0.5 * t.sum(2 * p_second[1] - 2", "self.decoder = Decoder() def forward(self, input, z=None): \"\"\" :param input: an Float tensor", "Float tensor with shape of [batch_size, 1, 28, 28] :param z: an Float", "shape of [batch_size, 1, 28, 28] :param z: an Float tensor with shape", "-1) return self.decoder(z), mu, logvar, z def encode(self, input): return self.encoder(input) def decode(self,", "@staticmethod def divergence_with_posterior(p_first, p_second): \"\"\" :params p_first, p_second: tuples with parameters of distribution", "Encoder() self.decoder = Decoder() def forward(self, input, z=None): \"\"\" :param input: an Float", "28, 28], [batch_size, 16], [batch_size, 16] \"\"\" mu, logvar = self.encoder(input) [batch_size, latent_size]", "std = t.exp(0.5 * logvar) z = Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z", "log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x - log_p_z return", "+ 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): \"\"\" :params p_first, p_second: tuples with", "[batch_size, 1, 28, 28], [batch_size, 16], [batch_size, 16] \"\"\" mu, logvar = self.encoder(input)", "return 0.5 * t.sum(2 * p_second[1] - 2 * p_first[1] + t.exp(p_first[1]) /", "* t.sum(2 * p_second[1] - 2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) +", "normal_prob(z, mu, std): return t.exp(-0.5 * ((z - mu) * t.pow(std + 1e-8,", "Variable(t.ones(batch_size, latent_size))) result = log_p_z_x - log_p_z return result.view(-1, n).sum(1) / n @staticmethod", "tensor with shape of [batch_size, 1, 28, 28] :param z: an Float tensor", "nn import torch.nn.functional as F from torch.autograd import Variable from model.encoder import Encoder", "z.view(batch_size * 15, -1) return self.decoder(z), mu, logvar, z def encode(self, input): return", "Decoder import math class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder = Encoder() self.decoder", "return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size] = mu.size() log_p_z_x", "t.exp(logvar) + 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): \"\"\" :params p_first, p_second: tuples", "* (z - mu)).sum(1)) / \\ t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod def", "of [batch_size, 1, 28, 28], [batch_size, 16], [batch_size, 16] \"\"\" mu, logvar =", "mu)).sum(1)) / \\ t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return", "VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x", "z=None): \"\"\" :param input: an Float tensor with shape of [batch_size, 1, 28,", "28, 28] :param z: an Float tensor with shape of [batch_size, latent_size] if", "with shape of [batch_size, 1, 28, 28] :param z: an Float tensor with", "* 15, -1) return self.decoder(z), mu, logvar, z def encode(self, input): return self.encoder(input)", "std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x - log_p_z", "distribution over latent variables :return: divirgence estimation \"\"\" return 0.5 * t.sum(2 *", "self.decoder(z), mu, logvar, z def encode(self, input): return self.encoder(input) def decode(self, input): return", "mu, logvar, z def encode(self, input): return self.encoder(input) def decode(self, input): return self.decoder(input)", "result = log_p_z_x - log_p_z return result.view(-1, n).sum(1) / n @staticmethod def normal_prob(z,", "t import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable", "t.exp(-0.5 * ((z - mu) * t.pow(std + 1e-8, -1) * (z -", "28], [batch_size, 16], [batch_size, 16] \"\"\" mu, logvar = self.encoder(input) [batch_size, latent_size] =", "mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z = z * std_repeated", "\"\"\" return 0.5 * t.sum(2 * p_second[1] - 2 * p_first[1] + t.exp(p_first[1])", "with shape of [batch_size, latent_size] if sampling is performed :return: an Float tensor", "std, n): [batch_size, latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z =", "t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar", ":return: divirgence estimation \"\"\" return 0.5 * t.sum(2 * p_second[1] - 2 *", "self.encoder(input) def decode(self, input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std, n): [batch_size,", "mu.size() std = t.exp(0.5 * logvar) z = Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda:", "* math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar -", "- log_p_z return result.view(-1, n).sum(1) / n @staticmethod def normal_prob(z, mu, std): return", "t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second):", "= log_p_z_x - log_p_z return result.view(-1, n).sum(1) / n @staticmethod def normal_prob(z, mu,", "p_second): \"\"\" :params p_first, p_second: tuples with parameters of distribution over latent variables", "log_p_z_x - log_p_z return result.view(-1, n).sum(1) / n @staticmethod def normal_prob(z, mu, std):", "def decode(self, input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size]", "self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size] = mu.size() log_p_z_x =", "n @staticmethod def normal_prob(z, mu, std): return t.exp(-0.5 * ((z - mu) *", "divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1,", "mu_repeated z = z.view(batch_size * 15, -1) return self.decoder(z), mu, logvar, z def", "logvar) z = Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z = z.cuda() mu_repeated =", "as nn import torch.nn.functional as F from torch.autograd import Variable from model.encoder import", "[batch_size, 16] \"\"\" mu, logvar = self.encoder(input) [batch_size, latent_size] = mu.size() std =", "n): [batch_size, latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z,", "from torch.autograd import Variable from model.encoder import Encoder from model.decoder import Decoder import", "latent_size))) result = log_p_z_x - log_p_z return result.view(-1, n).sum(1) / n @staticmethod def", "15, -1) return self.decoder(z), mu, logvar, z def encode(self, input): return self.encoder(input) def", "t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] - p_second[0], 2) / (t.exp(p_second[1]) +", "/ (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] - p_second[0], 2) / (t.exp(p_second[1]) + 1e-8)", "an Float tensor with shape of [batch_size, 1, 28, 28] :param z: an", "self).__init__() self.encoder = Encoder() self.decoder = Decoder() def forward(self, input, z=None): \"\"\" :param", "Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1)", "Float tensor with shape of [batch_size, 1, 28, 28], [batch_size, 16], [batch_size, 16]", "15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z = z * std_repeated +", "[batch_size, latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size,", "parameters of distribution over latent variables :return: divirgence estimation \"\"\" return 0.5 *", "+ 1e-8) + t.pow(p_second[0] - p_second[0], 2) / (t.exp(p_second[1]) + 1e-8) - 1).mean()", "return t.exp(-0.5 * ((z - mu) * t.pow(std + 1e-8, -1) * (z", "= mu.size() std = t.exp(0.5 * logvar) z = Variable(t.randn([batch_size, 15, latent_size])) if", "F from torch.autograd import Variable from model.encoder import Encoder from model.decoder import Decoder", "28] :param z: an Float tensor with shape of [batch_size, latent_size] if sampling", "z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z =", "(t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] - p_second[0], 2) / (t.exp(p_second[1]) + 1e-8) -", "std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z = z * std_repeated + mu_repeated z", "p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] - p_second[0], 2) /", "monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z, mu, std)", "self.encoder(input) [batch_size, latent_size] = mu.size() std = t.exp(0.5 * logvar) z = Variable(t.randn([batch_size,", "Float tensor with shape of [batch_size, latent_size] if sampling is performed :return: an", "= Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15,", "= VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result =", "latent_size] if sampling is performed :return: an Float tensor with shape of [batch_size,", "mu, logvar = self.encoder(input) [batch_size, latent_size] = mu.size() std = t.exp(0.5 * logvar)", "- t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): \"\"\"", "+ mu_repeated z = z.view(batch_size * 15, -1) return self.decoder(z), mu, logvar, z", "def divergence_with_posterior(p_first, p_second): \"\"\" :params p_first, p_second: tuples with parameters of distribution over", "\\ t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return (-0.5 *", "= z * std_repeated + mu_repeated z = z.view(batch_size * 15, -1) return", "sampling is performed :return: an Float tensor with shape of [batch_size, 1, 28,", "of [batch_size, latent_size] if sampling is performed :return: an Float tensor with shape", "mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x -", "((z - mu) * t.pow(std + 1e-8, -1) * (z - mu)).sum(1)) /", "Encoder from model.decoder import Decoder import math class VAE(nn.Module): def __init__(self): super(VAE, self).__init__()", "2) - t.exp(logvar) + 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): \"\"\" :params p_first,", "- 2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] -", "[batch_size, 1, 28, 28] :param z: an Float tensor with shape of [batch_size,", "z = z.view(batch_size * 15, -1) return self.decoder(z), mu, logvar, z def encode(self,", "t.exp(0.5 * logvar) z = Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z = z.cuda()", "tuples with parameters of distribution over latent variables :return: divirgence estimation \"\"\" return", "= Decoder() def forward(self, input, z=None): \"\"\" :param input: an Float tensor with", "logvar = self.encoder(input) [batch_size, latent_size] = mu.size() std = t.exp(0.5 * logvar) z", "input.is_cuda: z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15,", "math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar - t.pow(mu,", "15, 1) z = z * std_repeated + mu_repeated z = z.view(batch_size *", "Decoder() def forward(self, input, z=None): \"\"\" :param input: an Float tensor with shape", "1, 28, 28] :param z: an Float tensor with shape of [batch_size, latent_size]", "torch.nn.functional as F from torch.autograd import Variable from model.encoder import Encoder from model.decoder", "\"\"\" :param input: an Float tensor with shape of [batch_size, 1, 28, 28]", "super(VAE, self).__init__() self.encoder = Encoder() self.decoder = Decoder() def forward(self, input, z=None): \"\"\"" ]
[ "logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage') event", "def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block =", "= redis.get_redis() verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0", "logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification", "entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data = { 'transactionHash':", "redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db", "entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, } perform_ipfs_meta_verifications_array.delay(entry_data) if block_number", "entry_args = dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args':", ".perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis()", "= logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage') event =", "= verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0 if last_block != 0: last_block =", "last_block = redis_db.get('_verification_filter_block') or 0 if last_block != 0: last_block = int(last_block) hash_filter", "!= 0: last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number", "in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(),", "block_number = int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(),", "bdn import contract from bdn import redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger", "verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0 if last_block", "import contract from bdn import redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger =", "listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block')", "redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or", "for entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data = {", "perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage')", "0: last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number =", "= int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber':", "{ 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), },", "'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, } perform_ipfs_meta_verifications_array.delay(entry_data) if block_number >", "contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0 if last_block != 0:", "from celery import shared_task from bdn import contract from bdn import redis from", "= int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args", "last_block != 0: last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries():", "contract from bdn import redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__)", "redis_db.get('_verification_filter_block') or 0 if last_block != 0: last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block)", "hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash':", "int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'],", "import redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage():", "from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db =", "if last_block != 0: last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry in", "'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, }", "= event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data", "{ 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, } perform_ipfs_meta_verifications_array.delay(entry_data) if block_number > last_block: redis_db.set('_verification_filter_block', block_number)", "event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data =", "= { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(),", "celery import shared_task from bdn import contract from bdn import redis from .perform_ipfs_meta_verifications_array", "event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0 if last_block != 0: last_block", "int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args =", "import logging from celery import shared_task from bdn import contract from bdn import", "entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, } perform_ipfs_meta_verifications_array.delay(entry_data)", "entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, } perform_ipfs_meta_verifications_array.delay(entry_data) if block_number > last_block:", "= contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0 if last_block !=", "last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber'])", "import shared_task from bdn import contract from bdn import redis from .perform_ipfs_meta_verifications_array import", "from bdn import redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task", "import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage", "verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0 if last_block != 0: last_block = int(last_block)", "hash_filter = event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args = dict(entry['args'])", "'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, } perform_ipfs_meta_verifications_array.delay(entry_data) if", "from bdn import contract from bdn import redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array)", "'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, } perform_ipfs_meta_verifications_array.delay(entry_data) if block_number > last_block: redis_db.set('_verification_filter_block',", "@shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block", "= redis_db.get('_verification_filter_block') or 0 if last_block != 0: last_block = int(last_block) hash_filter =", "or 0 if last_block != 0: last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for", "shared_task from bdn import contract from bdn import redis from .perform_ipfs_meta_verifications_array import (", "0 if last_block != 0: last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry", "logging from celery import shared_task from bdn import contract from bdn import redis", "dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash':", "entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash',", "( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage =", "= dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': {", "bdn import redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def", "redis.get_redis() verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0 if" ]
[ "test_push_trace(self): pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self): base", "self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message,", "invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self):", "self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not", "def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self):", "def test_init(self): from prestans.rest import Request import logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from", "attribute_filter_differs.message, \"attribute filter contains attributes (cat, dog) that are not part of template\"", "= exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str given, expected int\") class", "stack_trace = [{ \"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class", "message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import Request import logging logging.basicConfig() self.logger", "def test_push_trace(self): pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self):", "be greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status,", "}] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\",", "conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found =", "def test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str given,", "contains attributes (cat, dog) that are not part of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase):", "self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY)", "Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\")", "\"message\", \"blueprint\": {\"key\": \"value\"} } ]) self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self):", "HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status,", "request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception),", "test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status,", "\"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error =", "STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace,", "self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST)", "test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error", "to be greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError(\"dog\", 2)", "from prestans.rest import Request import logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from prestans.deserializer import", "self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service is only available to authenticated\") authentication_custom =", "STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self):", "\"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message,", "value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered", "\"attribute_name\": \"attribute\", \"value\": \"value\", \"message\": \"message\", \"blueprint\": {\"key\": \"value\"} } ]) self.assertEqual(str(validation_error), \"attribute", "accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the Accept", "ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error),", "handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request)", "= MyModel() response = exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model)", "self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message,", "exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def", "exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message),", "self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\") stack_trace = [{ \"requested_type\": \"text/plain\", \"supported_types\":", "environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request", "\"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\") stack_trace = [{ \"requested_type\": \"text/plain\",", "chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request),", "allowed maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3, [1, 2,", "self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status,", "exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase):", "\"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class", "less than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)", "class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\")", "ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status,", "not provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED)", "class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\")", "class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase):", "\"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs =", "def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase): def", "validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\")", "\"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url", "= exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is not one", "authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self):", "dog has to be less than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp =", "stored data on the server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name': \"name\"}]) class", "serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\")", "class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class", "self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\",", "ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import Model class MyModel(Model): pass my_model = MyModel()", "data on the server\") self.assertEqual( str(error), \"DataAdapter failed to adapt name, Data Adapter", "\"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self):", "import unittest from prestans.http import STATUS from prestans.http import VERB from prestans import", "\"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error =", "the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError()", "this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization", "self.assertEqual(exp.message, \"data type str given, expected int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter =", "exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required and does not provide", "\"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str given, expected int\") class ExceptionMissingParameterError(unittest.TestCase): def", "these choices 1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError(\"dog\", 5)", "def test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not allowed to", "STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status,", "exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def", "= exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required and does not provide a", "class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class", "Accept header\") stack_trace = [{ \"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace,", "my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class", "ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error),", "message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed to validate stored data on the", "handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome", "\"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" } request =", "be less than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status,", "test_init(self): from prestans.rest import Request import logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from prestans.deserializer", "self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value =", "\"data type str given, expected int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError()", "class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status,", "\"Authentication Error; service is only available to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status,", "\"value\", \"message\": \"message\", \"blueprint\": {\"key\": \"value\"} } ]) self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase):", "exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization =", "cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid", "request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does", "base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error", "\"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError( message=\"message\",", "\"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\", \"value\": \"value\", \"message\": \"message\", \"blueprint\": {\"key\": \"value\"}", "= { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\":", "authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service is only available to", "ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not implement", "not implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint", "that are not part of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error =", "5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be greater than", "exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict()", "failed to validate stored data on the server\") self.assertEqual( str(error), \"DataAdapter failed to", "self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def", "invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def", "self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status,", "STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class", "blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\", \"value\":", "ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required and", "pass my_model = MyModel() response = exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\")", "import Request import logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from prestans.deserializer import JSON charset", "} request = Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN,", "format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed:", "\"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the Accept header\") stack_trace =", "self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"]", "def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message,", "\"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message,", "STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError()", "Content-Type in Request\") stack_trace = [{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace)", "exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE)", "STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST)", "message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError(\"Role\")", "{ \"attribute_name\": \"attribute\", \"value\": \"value\", \"message\": \"message\", \"blueprint\": {\"key\": \"value\"} } ]) self.assertEqual(str(validation_error),", "self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message,", "test_init(self): from prestans.types import Model class MyModel(Model): pass my_model = MyModel() response =", "= exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base", "failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\")", "not one of these choices 1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp", "the server\") self.assertEqual( str(error), \"DataAdapter failed to adapt name, Data Adapter failed to", "def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters for data", "\"API does not implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def", "def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase): def", "STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not allowed to access this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def", "exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\")", "exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required and does not provide a default", "= exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in", "def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization", "test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase): def test_init(self):", "STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase):", "ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value:", "base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def test_message(self): base_value =", "base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status,", "exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\")", "self.assertEqual(exp.message, \"5 is more than the allowed maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def", "\"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error = exception.InternalServerError() self.assertEqual(internal_server_error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(internal_server_error.message, \"Internal Server", "\"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment", "def test_init(self): exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog", "test_init(self): exp = exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is", "5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less than the allowed minimum of 5\")", "is less than the allowed minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp", "STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception =", "than the allowed minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5,", "self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status,", "end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error;", "class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of", "STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\") stack_trace = [{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\"", "for data model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import Model class", "STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\", \"value\": \"value\", \"message\": \"message\", \"blueprint\":", "service is only available to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message,", "self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status,", "STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self):", "class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type", "ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str", "self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved", "self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status,", "charset = \"utf-8\" serializers = [JSON()] default_serializer = JSON() request_environ = { \"REQUEST_METHOD\":", "exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status,", "self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message,", "STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be less than 2\") class", "test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\") stack_trace", "[ { \"attribute_name\": \"attribute\", \"value\": \"value\", \"message\": \"message\", \"blueprint\": {\"key\": \"value\"} } ])", "\"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST)", "STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less than the allowed minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase):", "ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase):", "= request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"')", "unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\") stack_trace =", "exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be greater", "response = exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException,", "JSON() request_environ = { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\":", "Request\") stack_trace = [{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase):", "def test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed", "import STATUS from prestans.http import VERB from prestans import exception class ExceptionBase(unittest.TestCase): def", "import logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from prestans.deserializer import JSON charset = \"utf-8\"", "test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required and does not", "exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb", "meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no", "class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type", "ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter", "class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import Request import logging logging.basicConfig() self.logger =", "test_init(self): validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message,", "this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication", "= exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed to validate stored", "unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters for data model namespace.Model\")", "request_environ = { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\",", "\"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role", "logging.getLogger(\"prestans\") from prestans.deserializer import JSON charset = \"utf-8\" serializers = [JSON()] default_serializer =", "def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def", "= exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR,", "STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more than the allowed maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase):", "default value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser", "exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more than the allowed", "STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is not one of these choices 1, 2, 5\")", "= exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error = exception.InternalServerError()", "VERB from prestans import exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\")", "than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message,", "= Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request", "self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status,", "STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url", "self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters for data model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def", "STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str given, expected int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter", "the Accept header\") stack_trace = [{ \"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\", \"c\"] }]", "Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class", "exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service is only available to authenticated\") authentication_custom", "= exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self):", "to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase):", "default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\")", "exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more than the allowed maximum of", "in the Accept header\") stack_trace = [{ \"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\", \"c\"]", "to validate stored data on the server\") self.assertEqual( str(error), \"DataAdapter failed to adapt", "ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED)", "test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter contains attributes", "class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of", "exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET", "class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase):", "STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter contains attributes (cat, dog) that are not part", ") self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp =", "less than the allowed minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp =", "self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message,", "message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is", "self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\", \"value\": \"value\", \"message\": \"message\", \"blueprint\": {\"key\":", "def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not implement the", "deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message,", "\"Unsupported Content-Type in Request\") stack_trace = [{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace,", "\"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid", "self.assertEqual(authorization.message, \"Role is not allowed to access this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self):", "exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized()", "\"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self):", "has to be less than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError(\"str\",", "Model class MyModel(Model): pass my_model = MyModel() response = exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status,", "class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error = exception.InternalServerError() self.assertEqual(internal_server_error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(internal_server_error.message, \"Internal Server Error\")", "\"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\")", "import JSON charset = \"utf-8\" serializers = [JSON()] default_serializer = JSON() request_environ =", "\"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status,", "test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def", "\"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service", "test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self):", "\"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is", "\"3 is less than the allowed minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self):", "self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException(\"message\")", "self.assertEqual( str(error), \"DataAdapter failed to adapt name, Data Adapter failed to validate stored", "int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\")", "self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\", \"value\": \"value\", \"message\": \"message\", \"blueprint\": {\"key\": \"value\"} }", "exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to", "of value: dog has to be less than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self):", "stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"}", "attributes (cat, dog) that are not part of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def", "STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)", "exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str given, expected int\")", "\"value 3 is not one of these choices 1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase):", "server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp", "ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not allowed", "self.assertEqual(exp.message, \"length of value: dog has to be greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase):", "STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def", "Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND)", "test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found", "\"blueprint\": {\"key\": \"value\"} } ]) self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from", "Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request =", "exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter =", "\"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" } request = Request(", "= [JSON()] default_serializer = JSON() request_environ = { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\":", "STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message,", "\"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\")", "Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class", "def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat provided\") class", "def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\")", "registered adapters for data model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import", "import VERB from prestans import exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK,", "self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is not one of these choices 1, 2,", "\"message\") def test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase):", "provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta", "= exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be", "= exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase):", "\"attribute message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import Request import logging logging.basicConfig()", "test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase):", "self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}])", "exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value", "to access this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND)", "class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not", "implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint =", "\"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable", "attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter contains attributes (cat,", "STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters for data model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self):", "def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def", "my_model = MyModel() response = exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model,", "STATUS from prestans.http import VERB from prestans import exception class ExceptionBase(unittest.TestCase): def test_http_status(self):", "= exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError()", "handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class", "does not provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status,", "]) self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import Request import", "self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome", "no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not provide this end-point\") class", "\"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" } request = Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer", "exp = exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is not", "class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\")", "error = exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed to validate", "test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def test_message(self): base_value", "[JSON()] default_serializer = JSON() request_environ = { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\",", "logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from prestans.deserializer import JSON charset = \"utf-8\" serializers", "namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import Model class MyModel(Model): pass my_model", "\"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad", "class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message,", "base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, [])", "\"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\")", "= exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3,", "\"DataAdapter failed to adapt name, Data Adapter failed to validate stored data on", "missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format", "logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from prestans.deserializer import JSON charset = \"utf-8\" serializers =", "= logging.getLogger(\"prestans\") from prestans.deserializer import JSON charset = \"utf-8\" serializers = [JSON()] default_serializer", "STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST)", "2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message,", "message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\")", "logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN)", "\"Role is not allowed to access this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error", "self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND)", "self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error = exception.InternalServerError() self.assertEqual(internal_server_error.http_status, STATUS.INTERNAL_SERVER_ERROR)", "template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR)", "failed to adapt name, Data Adapter failed to validate stored data on the", "Error; service is only available to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED)", "expected int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing", "class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class", "exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the", "\"Data Adapter failed to validate stored data on the server\") self.assertEqual( str(error), \"DataAdapter", "is more than the allowed maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp", "ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def", "class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute", "class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is", "Adapter failed to validate stored data on the server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error", "self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status,", "self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET", "3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more than the allowed maximum of 3\")", "message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message,", "ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in", "self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST,", "than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message,", "exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [", "[{ \"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def", "from prestans.http import STATUS from prestans.http import VERB from prestans import exception class", "self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be less than 2\")", "2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data", "\"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message,", ") self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\", \"value\": \"value\", \"message\":", "\"attribute is required and does not provide a default value\") class ExceptionParseFailedError(unittest.TestCase): def", "self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message,", "\"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" } request = Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers,", "\"attribute filter contains attributes (cat, dog) that are not part of template\" )", "= exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\") stack_trace = [{", "MyModel() response = exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError,", "\"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\",", "class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not", "test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str given, expected", "def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self):", "def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required and does", "from prestans.http import VERB from prestans import exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value", "\"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\")", "forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error =", "test_init(self): exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has", "exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less than the allowed", "= exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request =", "self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str given, expected int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self):", "\"Unsupported vocabulary in the Accept header\") stack_trace = [{ \"accept_header\": \"accept\", \"supported_types\": [\"a\",", "supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the Accept header\")", "prestans.types import Model class MyModel(Model): pass my_model = MyModel() response = exception.ResponseException(STATUS.OK, \"message\",", "\"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\")", "self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message,", "self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error", "class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} )", "class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import Model class MyModel(Model): pass my_model =", "test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not provide this end-point\")", "failed to validate stored data on the server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error message\",", "= \"utf-8\" serializers = [JSON()] default_serializer = JSON() request_environ = { \"REQUEST_METHOD\": VERB.GET,", "exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\")", "from prestans import exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status,", "exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3, 5)", "unittest from prestans.http import STATUS from prestans.http import VERB from prestans import exception", "moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required", "test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self):", "from prestans.deserializer import JSON charset = \"utf-8\" serializers = [JSON()] default_serializer = JSON()", "choices 1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status,", "\"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"')", "= exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less than the allowed minimum", "ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service is", "to validate stored data on the server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name':", "service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request", "\"8080\" } request = Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception =", "STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed to validate stored data on the server\") self.assertEqual(", "self.assertEqual(exp.message, \"length of value: dog has to be less than 2\") class ExceptionInvalidTypeError(unittest.TestCase):", "are not part of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError(\"name\",", "VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" } request", "STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED)", "\"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed to validate stored data on", "= exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service is only available to authenticated\")", "to adapt name, Data Adapter failed to validate stored data on the server\"", "= exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required =", "class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg", "STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message,", "ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter", "= exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound()", "self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\")", "prestans.http import VERB from prestans import exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value =", "self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND,", "2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is not one of these choices", "= exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace,", "5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length", "failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual(", "on the server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def", "Data Adapter failed to validate stored data on the server\" ) self.assertEqual(error.stack_trace, [{'exception_message':", "= exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more than the allowed maximum", "3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)", "ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase):", "validate stored data on the server\") self.assertEqual( str(error), \"DataAdapter failed to adapt name,", "self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable =", "self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def", "JSON charset = \"utf-8\" serializers = [JSON()] default_serializer = JSON() request_environ = {", ") self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the Accept header\") stack_trace = [{", "header\") stack_trace = [{ \"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace)", "not provide a default value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status,", "message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message,", "\"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" } request = Request( environ=request_environ, charset=charset, logger=self.logger,", "STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN)", "test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed:", "exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp =", "parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value", "exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\") stack_trace = [{ \"requested_type\":", "= exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization", "ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more", "failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization", "payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden", "prestans import exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK)", "unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently =", "test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed:", "exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not allowed to access this resource\") class", "exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status,", "= [{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self):", "= STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def", "Request\") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class", "def test_init(self): validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST)", "self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not allowed to access this resource\") class ExceptionSerializationFailedError(unittest.TestCase):", "prestans.rest import Request import logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from prestans.deserializer import JSON", "of value: dog has to be greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self):", "def test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more than", "class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters", "test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self):", "test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently", "\"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3", "self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error = exception.InternalServerError() self.assertEqual(internal_server_error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(internal_server_error.message, \"Internal", "1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError(\"dog\", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)", "class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is", "def test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self):", "only available to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\")", "format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message,", "exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase):", "than the allowed maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3,", "class MyModel(Model): pass my_model = MyModel() response = exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK)", "self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\":", "unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary", "\"SERVER_PORT\": \"8080\" } request = Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception", "exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error = exception.InternalServerError() self.assertEqual(internal_server_error.http_status,", "available to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class", "= exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message,", "is not one of these choices 1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self):", "exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base =", "self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less than the allowed minimum of 5\") class", "test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less than the", "STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service is only available to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom", "self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status,", "exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status,", "from prestans.types import Model class MyModel(Model): pass my_model = MyModel() response = exception.ResponseException(STATUS.OK,", "ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\")", "serializers = [JSON()] default_serializer = JSON() request_environ = { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\",", "prestans.deserializer import JSON charset = \"utf-8\" serializers = [JSON()] default_serializer = JSON() request_environ", "stored data on the server\") self.assertEqual( str(error), \"DataAdapter failed to adapt name, Data", "adapters for data model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import Model", "data on the server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase):", "value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\")", "= JSON() request_environ = { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\",", "= exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase):", "\"error message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)", "\"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the Accept header\") stack_trace", "= exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden =", "\"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] )", "\"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" } request = Request( environ=request_environ,", "does not implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self):", "unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not implement the HTTP Verb\")", "\"API does not provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError()", "default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST)", "exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest()", "ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters for", "stack_trace = [{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def", "test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self):", "\"Payment Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\")", "\"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb =", "exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired()", "test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not allowed to access", "self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status,", "greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)", "\"localhost\", \"SERVER_PORT\": \"8080\" } request = Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer )", "exception.InvalidTypeError(\"str\", \"int\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"data type str given, expected int\") class ExceptionMissingParameterError(unittest.TestCase):", "exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter contains attributes (cat, dog) that", "'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\")", "given, expected int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message,", "ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less", "the allowed maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3, [1,", "ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import Request import logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\")", "allowed to access this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status,", "test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\")", "= exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters for data model namespace.Model\") class", "failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\",", "ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase):", "STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message,", "of these choices 1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError(\"dog\",", "= exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be", "= exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class", "test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp", "Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\")", "chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND)", "self.assertEqual(unimplemented_verb.message, \"API does not implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase):", "\"length of value: dog has to be less than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def", "[1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is not one of these", "server\") self.assertEqual( str(error), \"DataAdapter failed to adapt name, Data Adapter failed to validate", "self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message,", "[{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error", "= exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict =", "deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\")", "} ]) self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import Request", "def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service is only", "custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp =", "vocabulary in the Accept header\") stack_trace = [{ \"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\",", "adapt name, Data Adapter failed to validate stored data on the server\" )", "def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self): base = exception.Base(http_status=STATUS.OK,", "self.logger = logging.getLogger(\"prestans\") from prestans.deserializer import JSON charset = \"utf-8\" serializers = [JSON()]", "\"no registered adapters for data model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types", "= exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized =", "{ \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\"", "STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be greater than 5\") class", "type str given, expected int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status,", "def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def test_message(self):", "(cat, dog) that are not part of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self):", "ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value:", "ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def", "5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5", "{\"key\": \"value\"} } ]) self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest", "\"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self):", "provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message,", "Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp", "self.assertEqual(invalid_format.message, \"invalid value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status,", "test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more than the", "def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class", "self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\") stack_trace = [{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }]", ") class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message,", "\"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad", "self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request", "2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be less than", "value: dog has to be less than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp", "\"supported_types\": [\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type =", "self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the Accept header\") stack_trace = [{ \"accept_header\":", "on the server\") self.assertEqual( str(error), \"DataAdapter failed to adapt name, Data Adapter failed", "class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class", "test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self):", "self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import Request import logging", "self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status,", "\"utf-8\" serializers = [JSON()] default_serializer = JSON() request_environ = { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\":", "\"value\": \"value\", \"message\": \"message\", \"blueprint\": {\"key\": \"value\"} } ]) self.assertEqual(str(validation_error), \"attribute message\") class", "'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\")", "[{\"verb\": \"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API", "STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication", "self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed to validate stored data on the server\")", "STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED)", "\"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API", "self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\")", "name, Data Adapter failed to validate stored data on the server\" ) self.assertEqual(error.stack_trace,", "import exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status", "\"GET\"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does", "def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not provide this", "more than the allowed maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp =", "ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat provided\")", "= exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter", "STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required and does not provide a default value\") class", "base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self):", "= exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK,", "value: dog has to be greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp", "self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter contains attributes (cat, dog) that are not", "test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed to", "ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase):", "\"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status,", "value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\",", "ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value", "value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message,", "exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less than the allowed minimum of", "self.assertEqual(unregistered_adapter.message, \"no registered adapters for data model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self): from", "\"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, \"Moved Permanently\")", "in Request\") stack_trace = [{ \"requested_type\": \"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class", "test_init(self): exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has", "= exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently()", "ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status,", "self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase):", "ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase): def", "stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported", "[{'exception_message': \"error message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException(\"message\") self.assertEqual(exp.http_status,", "one of these choices 1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp =", "}] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED)", "self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass", "authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def", "self.assertEqual(no_endpoint.message, \"API does not provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication =", "required and does not provide a default value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg", "= exception.ValidationError( message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace,", "def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT)", "'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message,", "= exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class", "def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def", "str(error), \"DataAdapter failed to adapt name, Data Adapter failed to validate stored data", "class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class", "test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, \"Service Unavailable\") class ExceptionBadRequest(unittest.TestCase): def test_init(self):", "and does not provide a default value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg =", "self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the Accept header\") stack_trace = [{ \"accept_header\": \"accept\", \"supported_types\":", "self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status,", "def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self):", "Adapter failed to validate stored data on the server\") self.assertEqual( str(error), \"DataAdapter failed", "request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\")", "self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"5 is more than the allowed maximum of 3\") class", "= [{ \"accept_header\": \"accept\", \"supported_types\": [\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase):", "def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter contains", "of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status,", "= exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self):", "MyModel(Model): pass my_model = MyModel() response = exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message,", "test_init(self): unregistered_adapter = exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters for data model", "class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service", "minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)", "def test_init(self): exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog", "exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to", "self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND)", "of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message,", "exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError(", "test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def", "class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad", "self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message,", ") handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request,", "ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase):", "self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)", "self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required and does not provide a default value\")", "data model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import Model class MyModel(Model):", "5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is not one of these choices 1,", "3 is not one of these choices 1, 2, 5\") class ExceptionMinimumLengthError(unittest.TestCase): def", "STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def", "exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3 is not one of", "message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK,", "\"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\", \"value\": \"value\",", "STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message,", "5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length", "pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self): base =", "\"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter contains attributes (cat, dog) that are", "request = Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\")", "self.assertEqual(base_value.message, \"message\") def test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class", "not allowed to access this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\")", "def test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def", "def test_init(self): from prestans.types import Model class MyModel(Model): pass my_model = MyModel() response", "message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\",", "self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\": \"attribute\", \"value\": \"value\", \"message\": \"message\",", "request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def", "class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data", "dog has to be greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp =", "self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\")", "is required and does not provide a default value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self):", "= exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def test_str(self): base = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK)", "test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase):", "self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs", "class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, \"Unauthorized\") class ExceptionMovedPermanently(unittest.TestCase):", "STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\",", "attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ { \"attribute_name\":", "provide a default value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST)", "self.assertEqual(exp.message, \"message\") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute", "self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\")", "\"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message,", "ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase):", "of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status,", "= exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, \"missing parameter\") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format =", "a default value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message,", "\"5 is more than the allowed maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self):", "\"invalid value cat provided\") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST)", "\"Bad Request\") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\")", "[]) def test_push_trace(self): pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.message, \"message\") def", "exception.UnregisteredAdapterError(\"namespace.Model\") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, \"no registered adapters for data model namespace.Model\") class ExceptionResponseException(unittest.TestCase):", "\"text/plain\", \"supported_types\": \"application/json\" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError(", "to be less than 2\") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError(\"str\", \"int\")", "= exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), \"message\") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error =", "ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not provide", "def test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"3 is less than", "STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT)", "STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED)", "self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be greater than 5\")", "def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def", "STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error = exception.InternalServerError() self.assertEqual(internal_server_error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(internal_server_error.message,", "prestans.http import STATUS from prestans.http import VERB from prestans import exception class ExceptionBase(unittest.TestCase):", "exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not implement the HTTP Verb\") self.assertEqual(unimplemented_verb.stack_trace, [{\"verb\":", "access this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message,", "class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError(\"cat\") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, \"invalid value cat", "STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self):", "self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\") self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception", "default_serializer = JSON() request_environ = { \"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\":", "not part of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error", "has to be greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError(\"dog\",", "self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self):", "https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status,", "\"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" } request = Request( environ=request_environ, charset=charset,", "filter contains attributes (cat, dog) that are not part of template\" ) class", "ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg =", "\"attribute\", \"value\": \"value\", \"message\": \"message\", \"blueprint\": {\"key\": \"value\"} } ]) self.assertEqual(str(validation_error), \"attribute message\")", "self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, \"Parser Failed\") custom_msg = exception.ParseFailedError(\"custom\") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, \"custom\") class", "exception.ResponseException(STATUS.OK, \"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\",", "exception.MaximumLengthError(\"dog\", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"length of value: dog has to be less", "https://localhost:8080/url chrome \"message\"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, \"message\") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, \"message\") self.assertEqual(handler_exception_without_request.log_message, \"message\")", "def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed: format\") self.assertEqual(str(serialization_failed_error), \"Serialization", "test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, \"Authentication Error; service is only available", "class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"attribute is required", "self.assertEqual(exp.message, \"attribute is required and does not provide a default value\") class ExceptionParseFailedError(unittest.TestCase):", "def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\", \"application/json\") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, \"Unsupported Content-Type in Request\")", "= exception.AttributeFilterDiffers([\"cat\", \"dog\"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, \"attribute filter contains attributes (cat, dog)", "\"length of value: dog has to be greater than 5\") class ExceptionMaximumLengthError(unittest.TestCase): def", "resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError(\"format\") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, \"Serialization failed:", "exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden()", "import Model class MyModel(Model): pass my_model = MyModel() response = exception.ResponseException(STATUS.OK, \"message\", my_model)", "str given, expected int\") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST)", "STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported vocabulary in the Accept header\") stack_trace = [{ \"accept_header\": \"accept\",", "format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers([\"cat\", \"dog\"])", "[\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError(\"text/plain\",", "class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT", "self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message=\"message\")", "class ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, \"Conflict\") class ExceptionNotFound(unittest.TestCase):", "self.assertEqual( attribute_filter_differs.message, \"attribute filter contains attributes (cat, dog) that are not part of", "self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, \"DeSerialization failed: format\") self.assertEqual(str(deserialization_failed_error), \"DeSerialization failed: format\") class ExceptionAttributeFilterDiffers(unittest.TestCase): def", "class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, \"Payment Required\") class", "exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message=\"message\") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status =", "self.assertEqual(error.message, \"Data Adapter failed to validate stored data on the server\") self.assertEqual( str(error),", "maximum of 3\") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3, [1, 2, 5])", "does not provide a default value\") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError()", "\"REQUEST_METHOD\": VERB.GET, \"PATH_INFO\": \"/url\", \"HTTP_USER_AGENT\": \"chrome\", \"wsgi.url_scheme\": \"https\", \"SERVER_NAME\": \"localhost\", \"SERVER_PORT\": \"8080\" }", "authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not allowed to access this", "\"message\": \"message\", \"blueprint\": {\"key\": \"value\"} } ]) self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase): def", "allowed minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status,", "part of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError(\"name\", \"error message\")", "self.assertEqual(exp.message, \"3 is less than the allowed minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def", "ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class", "the server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name': \"name\"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self):", "self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, \"API does not provide this end-point\") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self):", "= exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, \"message\") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message,", "STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom message\") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN)", "validate stored data on the server\" ) self.assertEqual(error.stack_trace, [{'exception_message': \"error message\", 'attribute_name': \"name\"}])", "request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome \"message\"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome \"message\"') handler_exception_without_request =", "test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header=\"accept\", supported_types=[\"a\", \"b\", \"c\"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, \"Unsupported", "= exception.AuthorizationError(\"Role\") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, \"Role is not allowed to access this resource\")", "is not allowed to access this resource\") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error =", "\"value\"} } ]) self.assertEqual(str(validation_error), \"attribute message\") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import", "format\") self.assertEqual(str(serialization_failed_error), \"Serialization failed: format\") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError(\"format\") self.assertEqual(deserialization_failed_error.http_status,", "is only available to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, \"Custom", "test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not implement the HTTP", "the allowed minimum of 5\") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5, 3)", "model namespace.Model\") class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import Model class MyModel(Model): pass", "bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, \"Bad Request\") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict", "not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, \"Not Found\") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized", "exception.InconsistentPersistentDataError(\"name\", \"error message\") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, \"Data Adapter failed to validate stored data", "def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, \"invalid meta value\") class ExceptionUnregisteredAdapterError(unittest.TestCase):", "class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, \"API does not", "message=\"message\", attribute_name=\"attribute\", value=\"value\", blueprint={\"key\": \"value\"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, \"message\") self.assertEqual(validation_error.stack_trace, [ {", "ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, \"Forbidden\") class ExceptionInternalServerError(unittest.TestCase): def", "self.assertEqual(request_exception.message, \"bad request\") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError(\"GET\") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message,", "my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable()", "dog) that are not part of template\" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error", "self.assertEqual(authentication.message, \"Authentication Error; service is only available to authenticated\") authentication_custom = exception.AuthenticationError(\"Custom message\")", "Request import logging logging.basicConfig() self.logger = logging.getLogger(\"prestans\") from prestans.deserializer import JSON charset =", "self.assertEqual(custom_msg.message, \"custom\") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message,", "\"accept\", \"supported_types\": [\"a\", \"b\", \"c\"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type", "\"message\", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, \"message\") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, \"message\", \"string\")", "self.assertEqual(str(handler_exception_without_request), \"message\") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, \"bad request\") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST)", "def test_init(self): exp = exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, \"value 3", "self.assertEqual(exp.message, \"value 3 is not one of these choices 1, 2, 5\") class", "charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, \"message\") handler_exception.request = request self.assertEqual(handler_exception.http_status," ]
[ "\"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) else: print(\"Item unavailable\",", "på lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item available', name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S',", "\"Power\" or store == \"Foetex\" or store == \"BR\" or store == \"Expert\":", "or store == \"Merlin\": if filtered is None: print(\"Item available\", name, store) update(product_url,", "== \"Coolshop\" or store == \"Power\" or store == \"Foetex\" or store ==", "= { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like", "Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock,", "product_url = product['product_url'] name = product['product_name'] identifier = product['class'] store = product['store'] find", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock, time): params = {'url':", "== \"Merlin\": if filtered is None: print(\"Item available\", name, store) update(product_url, \"På lager\",", "store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item available', name, store) update(product_url,", "time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) time.sleep(1) except Exception as e:", "time.strftime('%H:%M:%S', time.localtime())) else: print('Item available', name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store':", "store == \"Coolshop\" or store == \"Power\" or store == \"Foetex\" or store", "'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47", "= bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier) while True: in_stock = [] data =", "filtered is None: print('Item unavailable', name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime()))", "= {'url': url, 'stock': stock, 'time': time} r.get(url=update_product, params=params) def filter_html(url, find, identifier):", "update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) else: print(\"Item", "or store == \"Foetex\" or store == \"BR\" or store == \"Expert\": if", "= product['find'] filtered = filter_html(product_url, find, identifier) if store == \"Elgiganten\" or store", "time import json import mail as m import requests as r import os", "find, identifier) if store == \"Elgiganten\" or store == \"Proshop\" or store ==", "\"Elgiganten\" or store == \"Proshop\" or store == \"Happii\" or store == \"Merlin\":", "as bs import urllib.request import time import json import mail as m import", "True: in_stock = [] data = r.get(get_products).json() data = json.loads(data) for product in", "time.strftime('%H:%M:%S', time.localtime())) elif store == \"Bilka\" or store == \"Coolshop\" or store ==", "== \"Happii\" or store == \"Merlin\": if filtered is None: print(\"Item available\", name,", "r import os database_ip = os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers", "10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock, time): params =", "get_products = 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel", "or store == \"Power\" or store == \"Foetex\" or store == \"BR\" or", "headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML,", "== \"Power\" or store == \"Foetex\" or store == \"BR\" or store ==", "urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier) while True: in_stock = []", "as r import os database_ip = os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product'", "Safari/537.36'} def update(url, stock, time): params = {'url': url, 'stock': stock, 'time': time}", "\"Coolshop\" or store == \"Power\" or store == \"Foetex\" or store == \"BR\"", "product['class'] store = product['store'] find = product['find'] filtered = filter_html(product_url, find, identifier) if", "'url': product_url}) time.sleep(1) except Exception as e: print(e) if len(in_stock) > 0: print('Lets", "\"Merlin\": if filtered is None: print(\"Item available\", name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S',", "bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier) while True: in_stock = [] data = r.get(get_products).json()", "or store == \"BR\" or store == \"Expert\": if filtered is None: print('Item", "store = product['store'] find = product['find'] filtered = filter_html(product_url, find, identifier) if store", "Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock, time): params = {'url': url, 'stock': stock,", "{'url': url, 'stock': stock, 'time': time} r.get(url=update_product, params=params) def filter_html(url, find, identifier): req", "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'}", "import requests as r import os database_ip = os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product", "product['store'] find = product['find'] filtered = filter_html(product_url, find, identifier) if store == \"Elgiganten\"", "product_url}) else: print(\"Item unavailable\", name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) elif", "product['product_name'] identifier = product['class'] store = product['store'] find = product['find'] filtered = filter_html(product_url,", "if filtered is None: print('Item unavailable', name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S',", "None: print('Item unavailable', name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item", "\"Proshop\" or store == \"Happii\" or store == \"Merlin\": if filtered is None:", "def filter_html(url, find, identifier): req = urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read() soup =", "source = urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier) while True: in_stock", "= 'http://'+database_ip+'/update-product' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3)", "filtered = filter_html(product_url, find, identifier) if store == \"Elgiganten\" or store == \"Proshop\"", "in_stock = [] data = r.get(get_products).json() data = json.loads(data) for product in data:", "try: product_url = product['product_url'] name = product['product_name'] identifier = product['class'] store = product['store']", "time.localtime())) else: print('Item available', name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store,", "name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) elif store == \"Bilka\" or", "'time': time} r.get(url=update_product, params=params) def filter_html(url, find, identifier): req = urllib.request.Request(url, headers=headers) source", "stock, 'time': time} r.get(url=update_product, params=params) def filter_html(url, find, identifier): req = urllib.request.Request(url, headers=headers)", "r.get(url=update_product, params=params) def filter_html(url, find, identifier): req = urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read()", "= json.loads(data) for product in data: try: product_url = product['product_url'] name = product['product_name']", "'http://'+database_ip+'/update-product' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36", "identifier) if store == \"Elgiganten\" or store == \"Proshop\" or store == \"Happii\"", "= [] data = r.get(get_products).json() data = json.loads(data) for product in data: try:", "= r.get(get_products).json() data = json.loads(data) for product in data: try: product_url = product['product_url']", "json.loads(data) for product in data: try: product_url = product['product_url'] name = product['product_name'] identifier", "print(\"Item unavailable\", name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) elif store ==", "X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock, time): params", "== \"Proshop\" or store == \"Happii\" or store == \"Merlin\": if filtered is", "store == \"Foetex\" or store == \"BR\" or store == \"Expert\": if filtered", "\"BR\" or store == \"Expert\": if filtered is None: print('Item unavailable', name, store)", "print('Item unavailable', name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item available',", "= filter_html(product_url, find, identifier) if store == \"Elgiganten\" or store == \"Proshop\" or", "\"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item available', name, store) update(product_url, \"På lager\",", "os database_ip = os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers = {", "mail as m import requests as r import os database_ip = os.environ['database'] get_products", "or store == \"Coolshop\" or store == \"Power\" or store == \"Foetex\" or", "url, 'stock': stock, 'time': time} r.get(url=update_product, params=params) def filter_html(url, find, identifier): req =", "import bs4 as bs import urllib.request import time import json import mail as", "\"Expert\": if filtered is None: print('Item unavailable', name, store) update(product_url, \"Ikke på lager\",", "store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) time.sleep(1)", "store, 'name': name, 'url': product_url}) else: print(\"Item unavailable\", name, store) update(product_url, \"Ikke på", "= urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier) while True: in_stock =", "'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS", "find, identifier): req = urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser')", "as m import requests as r import os database_ip = os.environ['database'] get_products =", "= urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier)", "like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock, time): params = {'url': url, 'stock':", "= product['product_url'] name = product['product_name'] identifier = product['class'] store = product['store'] find =", "or store == \"Proshop\" or store == \"Happii\" or store == \"Merlin\": if", "== \"Bilka\" or store == \"Coolshop\" or store == \"Power\" or store ==", "på lager\", time.strftime('%H:%M:%S', time.localtime())) elif store == \"Bilka\" or store == \"Coolshop\" or", "filtered is None: print(\"Item available\", name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store':", "== \"Expert\": if filtered is None: print('Item unavailable', name, store) update(product_url, \"Ikke på", "= product['product_name'] identifier = product['class'] store = product['store'] find = product['find'] filtered =", "update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item available', name, store) update(product_url, \"På", "print('Item available', name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name,", "import time import json import mail as m import requests as r import", "headers=headers) source = urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier) while True:", "update_product = 'http://'+database_ip+'/update-product' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X", "available\", name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url':", "product_url}) time.sleep(1) except Exception as e: print(e) if len(in_stock) > 0: print('Lets email!')", "import urllib.request import time import json import mail as m import requests as", "product in data: try: product_url = product['product_url'] name = product['product_name'] identifier = product['class']", "name, 'url': product_url}) else: print(\"Item unavailable\", name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S',", "\"Bilka\" or store == \"Coolshop\" or store == \"Power\" or store == \"Foetex\"", "elif store == \"Bilka\" or store == \"Coolshop\" or store == \"Power\" or", "= product['class'] store = product['store'] find = product['find'] filtered = filter_html(product_url, find, identifier)", "or store == \"Happii\" or store == \"Merlin\": if filtered is None: print(\"Item", "product['find'] filtered = filter_html(product_url, find, identifier) if store == \"Elgiganten\" or store ==", "database_ip = os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers = { 'User-Agent':", "store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) else:", "lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) time.sleep(1) except Exception as", "urllib.request import time import json import mail as m import requests as r", "update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) elif store == \"Bilka\" or store ==", "update(url, stock, time): params = {'url': url, 'stock': stock, 'time': time} r.get(url=update_product, params=params)", "filter_html(url, find, identifier): req = urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source,", "stock, time): params = {'url': url, 'stock': stock, 'time': time} r.get(url=update_product, params=params) def", "\"Foetex\" or store == \"BR\" or store == \"Expert\": if filtered is None:", "in data: try: product_url = product['product_url'] name = product['product_name'] identifier = product['class'] store", "time): params = {'url': url, 'stock': stock, 'time': time} r.get(url=update_product, params=params) def filter_html(url,", "(KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock, time): params = {'url': url,", "identifier): req = urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser') return", "\"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) elif store == \"Bilka\" or store == \"Coolshop\"", "name, 'url': product_url}) time.sleep(1) except Exception as e: print(e) if len(in_stock) > 0:", "store == \"Proshop\" or store == \"Happii\" or store == \"Merlin\": if filtered", "import mail as m import requests as r import os database_ip = os.environ['database']", "'stock': stock, 'time': time} r.get(url=update_product, params=params) def filter_html(url, find, identifier): req = urllib.request.Request(url,", "data = r.get(get_products).json() data = json.loads(data) for product in data: try: product_url =", "[] data = r.get(get_products).json() data = json.loads(data) for product in data: try: product_url", "'url': product_url}) else: print(\"Item unavailable\", name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime()))", "m import requests as r import os database_ip = os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products'", "identifier = product['class'] store = product['store'] find = product['find'] filtered = filter_html(product_url, find,", "return soup.find(find, class_=identifier) while True: in_stock = [] data = r.get(get_products).json() data =", "'name': name, 'url': product_url}) time.sleep(1) except Exception as e: print(e) if len(in_stock) >", "if store == \"Elgiganten\" or store == \"Proshop\" or store == \"Happii\" or", "store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) elif store == \"Bilka\" or store", "store == \"BR\" or store == \"Expert\": if filtered is None: print('Item unavailable',", "= 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac", "time.localtime())) elif store == \"Bilka\" or store == \"Coolshop\" or store == \"Power\"", "Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock, time): params = {'url': url, 'stock': stock, 'time':", "time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) time.sleep(1) except Exception as e: print(e)", "soup = bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier) while True: in_stock = [] data", "find = product['find'] filtered = filter_html(product_url, find, identifier) if store == \"Elgiganten\" or", "<gh_stars>0 import bs4 as bs import urllib.request import time import json import mail", "\"Happii\" or store == \"Merlin\": if filtered is None: print(\"Item available\", name, store)", "available', name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url':", "else: print(\"Item unavailable\", name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) elif store", "req = urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser') return soup.find(find,", "filter_html(product_url, find, identifier) if store == \"Elgiganten\" or store == \"Proshop\" or store", "'name': name, 'url': product_url}) else: print(\"Item unavailable\", name, store) update(product_url, \"Ikke på lager\",", "def update(url, stock, time): params = {'url': url, 'stock': stock, 'time': time} r.get(url=update_product,", "params=params) def filter_html(url, find, identifier): req = urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read() soup", "product['product_url'] name = product['product_name'] identifier = product['class'] store = product['store'] find = product['find']", "import os database_ip = os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers =", "time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) else: print(\"Item unavailable\", name, store)", "while True: in_stock = [] data = r.get(get_products).json() data = json.loads(data) for product", "requests as r import os database_ip = os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product =", "is None: print('Item unavailable', name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) else:", "== \"BR\" or store == \"Expert\": if filtered is None: print('Item unavailable', name,", "soup.find(find, class_=identifier) while True: in_stock = [] data = r.get(get_products).json() data = json.loads(data)", "OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url, stock, time):", "lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) else: print(\"Item unavailable\", name,", "\"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) time.sleep(1) except Exception", "{ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko)", "for product in data: try: product_url = product['product_url'] name = product['product_name'] identifier =", "time} r.get(url=update_product, params=params) def filter_html(url, find, identifier): req = urllib.request.Request(url, headers=headers) source =", "update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) time.sleep(1) except", "time.sleep(1) except Exception as e: print(e) if len(in_stock) > 0: print('Lets email!') m.send_email(in_stock)", "import json import mail as m import requests as r import os database_ip", "json import mail as m import requests as r import os database_ip =", "store == \"Bilka\" or store == \"Coolshop\" or store == \"Power\" or store", "name = product['product_name'] identifier = product['class'] store = product['store'] find = product['find'] filtered", "lager\", time.strftime('%H:%M:%S', time.localtime())) elif store == \"Bilka\" or store == \"Coolshop\" or store", "Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def update(url,", "if filtered is None: print(\"Item available\", name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime()))", "urllib.request.Request(url, headers=headers) source = urllib.request.urlopen(req).read() soup = bs.BeautifulSoup(source, 'html.parser') return soup.find(find, class_=identifier) while", "time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url}) else: print(\"Item unavailable\", name, store) update(product_url,", "in_stock.append({'store': store, 'name': name, 'url': product_url}) else: print(\"Item unavailable\", name, store) update(product_url, \"Ikke", "name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item available', name, store)", "= product['store'] find = product['find'] filtered = filter_html(product_url, find, identifier) if store ==", "store == \"Happii\" or store == \"Merlin\": if filtered is None: print(\"Item available\",", "(Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'} def", "is None: print(\"Item available\", name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store,", "unavailable\", name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) elif store == \"Bilka\"", "bs import urllib.request import time import json import mail as m import requests", "print(\"Item available\", name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name,", "store == \"Merlin\": if filtered is None: print(\"Item available\", name, store) update(product_url, \"På", "os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh;", "store == \"Elgiganten\" or store == \"Proshop\" or store == \"Happii\" or store", "'html.parser') return soup.find(find, class_=identifier) while True: in_stock = [] data = r.get(get_products).json() data", "== \"Foetex\" or store == \"BR\" or store == \"Expert\": if filtered is", "class_=identifier) while True: in_stock = [] data = r.get(get_products).json() data = json.loads(data) for", "data = json.loads(data) for product in data: try: product_url = product['product_url'] name =", "params = {'url': url, 'stock': stock, 'time': time} r.get(url=update_product, params=params) def filter_html(url, find,", "store == \"Power\" or store == \"Foetex\" or store == \"BR\" or store", "else: print('Item available', name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name':", "lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item available', name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime()))", "None: print(\"Item available\", name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name':", "= os.environ['database'] get_products = 'http://'+database_ip+'/get-all-products' update_product = 'http://'+database_ip+'/update-product' headers = { 'User-Agent': 'Mozilla/5.0", "data: try: product_url = product['product_url'] name = product['product_name'] identifier = product['class'] store =", "unavailable', name, store) update(product_url, \"Ikke på lager\", time.strftime('%H:%M:%S', time.localtime())) else: print('Item available', name,", "store, 'name': name, 'url': product_url}) time.sleep(1) except Exception as e: print(e) if len(in_stock)", "name, store) update(product_url, \"På lager\", time.strftime('%H:%M:%S', time.localtime())) in_stock.append({'store': store, 'name': name, 'url': product_url})", "r.get(get_products).json() data = json.loads(data) for product in data: try: product_url = product['product_url'] name", "store == \"Expert\": if filtered is None: print('Item unavailable', name, store) update(product_url, \"Ikke", "== \"Elgiganten\" or store == \"Proshop\" or store == \"Happii\" or store ==", "bs4 as bs import urllib.request import time import json import mail as m", "in_stock.append({'store': store, 'name': name, 'url': product_url}) time.sleep(1) except Exception as e: print(e) if", "or store == \"Expert\": if filtered is None: print('Item unavailable', name, store) update(product_url," ]
[ "i[s] + 1, i[sw] + 1, i[w], i[nw] - 1, ] j_adj =", "1, i[e], i[se] + 1, i[s] + 1, i[sw] + 1, i[w], i[nw]", "slow and could be improved is_adj = np.full(party.shape, False) for d in range(8):", "args = parser.parse_args() s = load_input(args.input_file) party = party_from_str(s) if args.sync: step =", "nw = n & w sw = s & w # north, northeast,", "return np.array([[int(i) for i in line] for line in lines]) def update(party): \"\"\"Update", "9): i, j = np.where(party > 9) n = i > 0 e", "1 while np.any(party > 9): i, j = np.where(party > 9) n =", "n & w sw = s & w # north, northeast, east, southeast,", "> 9): i, j = np.where(party > 9) n = i > 0", "np def load_input(file_name): with open(file_name, 'r') as file: return file.read() def party_from_str(s): lines", "i[se] + 1, i[s] + 1, i[sw] + 1, i[w], i[nw] - 1,", "e nw = n & w sw = s & w # north,", "i[nw] - 1, ] j_adj = [ j[n], j[ne] + 1, j[e] +", "a bit slow and could be improved is_adj = np.full(party.shape, False) for d", "count += np.count_nonzero(party == 0) return count def synchronise(party): step = 0 while", "east, southeast, south, southwest, west, northwest i_adj = [ i[n] - 1, i[ne]", "bit slow and could be improved is_adj = np.full(party.shape, False) for d in", "s & w # north, northeast, east, southeast, south, southwest, west, northwest i_adj", "as np def load_input(file_name): with open(file_name, 'r') as file: return file.read() def party_from_str(s):", "= j > 0 ne = n & e se = s &", "0 while np.any(party > 0): party = update(party) step += 1 return step", "octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file name') parser.add_argument('-s', '--sync', action='store_true') args", "party_from_str(s) if args.sync: step = synchronise(party) print(f'Step = {step}') else: count = count_flashes(party)", "def update(party): \"\"\"Update the party of octopuses. There is probably a faster way.\"\"\"", "i > 0 e = j < party.shape[1] - 1 s = i", "& e nw = n & w sw = s & w #", "'--sync', action='store_true') args = parser.parse_args() s = load_input(args.input_file) party = party_from_str(s) if args.sync:", "lines = s.splitlines() return np.array([[int(i) for i in line] for line in lines])", "parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file name') parser.add_argument('-s', '--sync', action='store_true') args = parser.parse_args() s", "file.read() def party_from_str(s): lines = s.splitlines() return np.array([[int(i) for i in line] for", "could be improved is_adj = np.full(party.shape, False) for d in range(8): is_adj[(i_adj[d], j_adj[d])]", "+= np.count_nonzero(party == 0) return count def synchronise(party): step = 0 while np.any(party", "] j_adj = [ j[n], j[ne] + 1, j[e] + 1, j[se] +", "1 s = i < party.shape[0] - 1 w = j > 0", "import argparse parser = argparse.ArgumentParser( description='Count octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input", "type=str, help='input file name') parser.add_argument('-s', '--sync', action='store_true') args = parser.parse_args() s = load_input(args.input_file)", "help='input file name') parser.add_argument('-s', '--sync', action='store_true') args = parser.parse_args() s = load_input(args.input_file) party", "ne = n & e se = s & e nw = n", "= argparse.ArgumentParser( description='Count octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file name') parser.add_argument('-s',", "open(file_name, 'r') as file: return file.read() def party_from_str(s): lines = s.splitlines() return np.array([[int(i)", "False) for d in range(8): is_adj[(i_adj[d], j_adj[d])] = True party[(party > 0) &", "party of octopuses. There is probably a faster way.\"\"\" party += 1 while", "step += 1 return step if __name__ == '__main__': import argparse parser =", "be improved is_adj = np.full(party.shape, False) for d in range(8): is_adj[(i_adj[d], j_adj[d])] =", "= False return party def count_flashes(party, num_steps=100): count = 0 for step in", "party = party_from_str(s) if args.sync: step = synchronise(party) print(f'Step = {step}') else: count", "1, i[ne] - 1, i[e], i[se] + 1, i[s] + 1, i[sw] +", "j[sw] - 1, j[w] - 1, j[nw] - 1, ] party[(i, j)] =", "party = update(party) count += np.count_nonzero(party == 0) return count def synchronise(party): step", "<gh_stars>10-100 import numpy as np def load_input(file_name): with open(file_name, 'r') as file: return", "flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file name') parser.add_argument('-s', '--sync', action='store_true') args =", "- 1, ] party[(i, j)] = 0 # This is a bit slow", "in range(num_steps): party = update(party) count += np.count_nonzero(party == 0) return count def", "north, northeast, east, southeast, south, southwest, west, northwest i_adj = [ i[n] -", "west, northwest i_adj = [ i[n] - 1, i[ne] - 1, i[e], i[se]", "== '__main__': import argparse parser = argparse.ArgumentParser( description='Count octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE',", "def load_input(file_name): with open(file_name, 'r') as file: return file.read() def party_from_str(s): lines =", "count def synchronise(party): step = 0 while np.any(party > 0): party = update(party)", "& w sw = s & w # north, northeast, east, southeast, south,", "is_adj] += 1 is_adj[:, :] = False return party def count_flashes(party, num_steps=100): count", "s = load_input(args.input_file) party = party_from_str(s) if args.sync: step = synchronise(party) print(f'Step =", "w sw = s & w # north, northeast, east, southeast, south, southwest,", "argparse parser = argparse.ArgumentParser( description='Count octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file", "= update(party) count += np.count_nonzero(party == 0) return count def synchronise(party): step =", "southwest, west, northwest i_adj = [ i[n] - 1, i[ne] - 1, i[e],", "j)] = 0 # This is a bit slow and could be improved", "+ 1, j[e] + 1, j[se] + 1, j[s], j[sw] - 1, j[w]", "= load_input(args.input_file) party = party_from_str(s) if args.sync: step = synchronise(party) print(f'Step = {step}')", "'__main__': import argparse parser = argparse.ArgumentParser( description='Count octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str,", "0 e = j < party.shape[1] - 1 s = i < party.shape[0]", "0 ne = n & e se = s & e nw =", "9) n = i > 0 e = j < party.shape[1] - 1", "j[w] - 1, j[nw] - 1, ] party[(i, j)] = 0 # This", "s.splitlines() return np.array([[int(i) for i in line] for line in lines]) def update(party):", "1 w = j > 0 ne = n & e se =", "- 1, i[ne] - 1, i[e], i[se] + 1, i[s] + 1, i[sw]", "[ i[n] - 1, i[ne] - 1, i[e], i[se] + 1, i[s] +", "args.sync: step = synchronise(party) print(f'Step = {step}') else: count = count_flashes(party) print(f'Count =", "- 1, ] j_adj = [ j[n], j[ne] + 1, j[e] + 1,", "step in range(num_steps): party = update(party) count += np.count_nonzero(party == 0) return count", "= s & e nw = n & w sw = s &", "1, j[w] - 1, j[nw] - 1, ] party[(i, j)] = 0 #", "= np.where(party > 9) n = i > 0 e = j <", "0) return count def synchronise(party): step = 0 while np.any(party > 0): party", "< party.shape[1] - 1 s = i < party.shape[0] - 1 w =", "- 1, i[e], i[se] + 1, i[s] + 1, i[sw] + 1, i[w],", "np.where(party > 9) n = i > 0 e = j < party.shape[1]", "south, southwest, west, northwest i_adj = [ i[n] - 1, i[ne] - 1,", "i[w], i[nw] - 1, ] j_adj = [ j[n], j[ne] + 1, j[e]", "= 0 for step in range(num_steps): party = update(party) count += np.count_nonzero(party ==", ") parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file name') parser.add_argument('-s', '--sync', action='store_true') args = parser.parse_args()", "if args.sync: step = synchronise(party) print(f'Step = {step}') else: count = count_flashes(party) print(f'Count", "0 for step in range(num_steps): party = update(party) count += np.count_nonzero(party == 0)", "1 return step if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description='Count", "= j < party.shape[1] - 1 s = i < party.shape[0] - 1", "s = i < party.shape[0] - 1 w = j > 0 ne", "> 0 ne = n & e se = s & e nw", "i_adj = [ i[n] - 1, i[ne] - 1, i[e], i[se] + 1,", "+ 1, i[sw] + 1, i[w], i[nw] - 1, ] j_adj = [", "i in line] for line in lines]) def update(party): \"\"\"Update the party of", "# This is a bit slow and could be improved is_adj = np.full(party.shape,", "& w # north, northeast, east, southeast, south, southwest, west, northwest i_adj =", "se = s & e nw = n & w sw = s", "This is a bit slow and could be improved is_adj = np.full(party.shape, False)", "= [ j[n], j[ne] + 1, j[e] + 1, j[se] + 1, j[s],", "return party def count_flashes(party, num_steps=100): count = 0 for step in range(num_steps): party", "num_steps=100): count = 0 for step in range(num_steps): party = update(party) count +=", "i[n] - 1, i[ne] - 1, i[e], i[se] + 1, i[s] + 1,", "np.any(party > 0): party = update(party) step += 1 return step if __name__", "is_adj = np.full(party.shape, False) for d in range(8): is_adj[(i_adj[d], j_adj[d])] = True party[(party", "party = update(party) step += 1 return step if __name__ == '__main__': import", "if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description='Count octopus flashes' )", "is_adj[:, :] = False return party def count_flashes(party, num_steps=100): count = 0 for", "> 9) n = i > 0 e = j < party.shape[1] -", "d in range(8): is_adj[(i_adj[d], j_adj[d])] = True party[(party > 0) & is_adj] +=", "= n & e se = s & e nw = n &", "metavar='INPUT_FILE', type=str, help='input file name') parser.add_argument('-s', '--sync', action='store_true') args = parser.parse_args() s =", "+= 1 while np.any(party > 9): i, j = np.where(party > 9) n", "file: return file.read() def party_from_str(s): lines = s.splitlines() return np.array([[int(i) for i in", "update(party): \"\"\"Update the party of octopuses. There is probably a faster way.\"\"\" party", "parser.add_argument('-s', '--sync', action='store_true') args = parser.parse_args() s = load_input(args.input_file) party = party_from_str(s) if", "in lines]) def update(party): \"\"\"Update the party of octopuses. There is probably a", "def count_flashes(party, num_steps=100): count = 0 for step in range(num_steps): party = update(party)", "s & e nw = n & w sw = s & w", "0 # This is a bit slow and could be improved is_adj =", "description='Count octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file name') parser.add_argument('-s', '--sync', action='store_true')", "+= 1 return step if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(", "j[s], j[sw] - 1, j[w] - 1, j[nw] - 1, ] party[(i, j)]", "count = 0 for step in range(num_steps): party = update(party) count += np.count_nonzero(party", "southeast, south, southwest, west, northwest i_adj = [ i[n] - 1, i[ne] -", "j[nw] - 1, ] party[(i, j)] = 0 # This is a bit", "np.full(party.shape, False) for d in range(8): is_adj[(i_adj[d], j_adj[d])] = True party[(party > 0)", "line] for line in lines]) def update(party): \"\"\"Update the party of octopuses. There", "np.array([[int(i) for i in line] for line in lines]) def update(party): \"\"\"Update the", "= party_from_str(s) if args.sync: step = synchronise(party) print(f'Step = {step}') else: count =", "for i in line] for line in lines]) def update(party): \"\"\"Update the party", "party.shape[1] - 1 s = i < party.shape[0] - 1 w = j", "j[ne] + 1, j[e] + 1, j[se] + 1, j[s], j[sw] - 1,", "1, j[se] + 1, j[s], j[sw] - 1, j[w] - 1, j[nw] -", "+ 1, j[s], j[sw] - 1, j[w] - 1, j[nw] - 1, ]", "1 is_adj[:, :] = False return party def count_flashes(party, num_steps=100): count = 0", "def party_from_str(s): lines = s.splitlines() return np.array([[int(i) for i in line] for line", "step if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description='Count octopus flashes'", "> 0): party = update(party) step += 1 return step if __name__ ==", "1, j[s], j[sw] - 1, j[w] - 1, j[nw] - 1, ] party[(i,", "way.\"\"\" party += 1 while np.any(party > 9): i, j = np.where(party >", "party def count_flashes(party, num_steps=100): count = 0 for step in range(num_steps): party =", "import numpy as np def load_input(file_name): with open(file_name, 'r') as file: return file.read()", "= 0 # This is a bit slow and could be improved is_adj", "range(num_steps): party = update(party) count += np.count_nonzero(party == 0) return count def synchronise(party):", "probably a faster way.\"\"\" party += 1 while np.any(party > 9): i, j", "step = synchronise(party) print(f'Step = {step}') else: count = count_flashes(party) print(f'Count = {count}')", "j_adj[d])] = True party[(party > 0) & is_adj] += 1 is_adj[:, :] =", "True party[(party > 0) & is_adj] += 1 is_adj[:, :] = False return", "party[(party > 0) & is_adj] += 1 is_adj[:, :] = False return party", "= 0 while np.any(party > 0): party = update(party) step += 1 return", "= [ i[n] - 1, i[ne] - 1, i[e], i[se] + 1, i[s]", "the party of octopuses. There is probably a faster way.\"\"\" party += 1", "step = 0 while np.any(party > 0): party = update(party) step += 1", "> 0 e = j < party.shape[1] - 1 s = i <", "i[e], i[se] + 1, i[s] + 1, i[sw] + 1, i[w], i[nw] -", "- 1 w = j > 0 ne = n & e se", "0): party = update(party) step += 1 return step if __name__ == '__main__':", "i[ne] - 1, i[e], i[se] + 1, i[s] + 1, i[sw] + 1,", "northwest i_adj = [ i[n] - 1, i[ne] - 1, i[e], i[se] +", "& is_adj] += 1 is_adj[:, :] = False return party def count_flashes(party, num_steps=100):", "1, ] party[(i, j)] = 0 # This is a bit slow and", "is a bit slow and could be improved is_adj = np.full(party.shape, False) for", "- 1 s = i < party.shape[0] - 1 w = j >", "j = np.where(party > 9) n = i > 0 e = j", "1, i[sw] + 1, i[w], i[nw] - 1, ] j_adj = [ j[n],", "i, j = np.where(party > 9) n = i > 0 e =", "= s & w # north, northeast, east, southeast, south, southwest, west, northwest", "for line in lines]) def update(party): \"\"\"Update the party of octopuses. There is", "# north, northeast, east, southeast, south, southwest, west, northwest i_adj = [ i[n]", "np.count_nonzero(party == 0) return count def synchronise(party): step = 0 while np.any(party >", "= parser.parse_args() s = load_input(args.input_file) party = party_from_str(s) if args.sync: step = synchronise(party)", "i[sw] + 1, i[w], i[nw] - 1, ] j_adj = [ j[n], j[ne]", "i < party.shape[0] - 1 w = j > 0 ne = n", "count_flashes(party, num_steps=100): count = 0 for step in range(num_steps): party = update(party) count", "faster way.\"\"\" party += 1 while np.any(party > 9): i, j = np.where(party", "'r') as file: return file.read() def party_from_str(s): lines = s.splitlines() return np.array([[int(i) for", "party.shape[0] - 1 w = j > 0 ne = n & e", "- 1, j[nw] - 1, ] party[(i, j)] = 0 # This is", "__name__ == '__main__': import argparse parser = argparse.ArgumentParser( description='Count octopus flashes' ) parser.add_argument('input_file',", "return count def synchronise(party): step = 0 while np.any(party > 0): party =", "+ 1, i[w], i[nw] - 1, ] j_adj = [ j[n], j[ne] +", "= i > 0 e = j < party.shape[1] - 1 s =", "w # north, northeast, east, southeast, south, southwest, west, northwest i_adj = [", "with open(file_name, 'r') as file: return file.read() def party_from_str(s): lines = s.splitlines() return", "+ 1, i[s] + 1, i[sw] + 1, i[w], i[nw] - 1, ]", "\"\"\"Update the party of octopuses. There is probably a faster way.\"\"\" party +=", "in line] for line in lines]) def update(party): \"\"\"Update the party of octopuses.", "j[n], j[ne] + 1, j[e] + 1, j[se] + 1, j[s], j[sw] -", "a faster way.\"\"\" party += 1 while np.any(party > 9): i, j =", "return file.read() def party_from_str(s): lines = s.splitlines() return np.array([[int(i) for i in line]", "sw = s & w # north, northeast, east, southeast, south, southwest, west,", "1, i[w], i[nw] - 1, ] j_adj = [ j[n], j[ne] + 1,", "j_adj = [ j[n], j[ne] + 1, j[e] + 1, j[se] + 1,", "j[se] + 1, j[s], j[sw] - 1, j[w] - 1, j[nw] - 1,", "party += 1 while np.any(party > 9): i, j = np.where(party > 9)", "< party.shape[0] - 1 w = j > 0 ne = n &", "] party[(i, j)] = 0 # This is a bit slow and could", "for d in range(8): is_adj[(i_adj[d], j_adj[d])] = True party[(party > 0) & is_adj]", "and could be improved is_adj = np.full(party.shape, False) for d in range(8): is_adj[(i_adj[d],", ":] = False return party def count_flashes(party, num_steps=100): count = 0 for step", "of octopuses. There is probably a faster way.\"\"\" party += 1 while np.any(party", "n & e se = s & e nw = n & w", "update(party) step += 1 return step if __name__ == '__main__': import argparse parser", "return step if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description='Count octopus", "synchronise(party): step = 0 while np.any(party > 0): party = update(party) step +=", "1, j[e] + 1, j[se] + 1, j[s], j[sw] - 1, j[w] -", "range(8): is_adj[(i_adj[d], j_adj[d])] = True party[(party > 0) & is_adj] += 1 is_adj[:,", "is probably a faster way.\"\"\" party += 1 while np.any(party > 9): i,", "j[e] + 1, j[se] + 1, j[s], j[sw] - 1, j[w] - 1,", "e = j < party.shape[1] - 1 s = i < party.shape[0] -", "= s.splitlines() return np.array([[int(i) for i in line] for line in lines]) def", "in range(8): is_adj[(i_adj[d], j_adj[d])] = True party[(party > 0) & is_adj] += 1", "== 0) return count def synchronise(party): step = 0 while np.any(party > 0):", "name') parser.add_argument('-s', '--sync', action='store_true') args = parser.parse_args() s = load_input(args.input_file) party = party_from_str(s)", "improved is_adj = np.full(party.shape, False) for d in range(8): is_adj[(i_adj[d], j_adj[d])] = True", "is_adj[(i_adj[d], j_adj[d])] = True party[(party > 0) & is_adj] += 1 is_adj[:, :]", "= i < party.shape[0] - 1 w = j > 0 ne =", "while np.any(party > 9): i, j = np.where(party > 9) n = i", "n = i > 0 e = j < party.shape[1] - 1 s", "> 0) & is_adj] += 1 is_adj[:, :] = False return party def", "0) & is_adj] += 1 is_adj[:, :] = False return party def count_flashes(party,", "1, ] j_adj = [ j[n], j[ne] + 1, j[e] + 1, j[se]", "parser.parse_args() s = load_input(args.input_file) party = party_from_str(s) if args.sync: step = synchronise(party) print(f'Step", "[ j[n], j[ne] + 1, j[e] + 1, j[se] + 1, j[s], j[sw]", "argparse.ArgumentParser( description='Count octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file name') parser.add_argument('-s', '--sync',", "= n & w sw = s & w # north, northeast, east,", "file name') parser.add_argument('-s', '--sync', action='store_true') args = parser.parse_args() s = load_input(args.input_file) party =", "numpy as np def load_input(file_name): with open(file_name, 'r') as file: return file.read() def", "There is probably a faster way.\"\"\" party += 1 while np.any(party > 9):", "octopuses. There is probably a faster way.\"\"\" party += 1 while np.any(party >", "+ 1, j[se] + 1, j[s], j[sw] - 1, j[w] - 1, j[nw]", "False return party def count_flashes(party, num_steps=100): count = 0 for step in range(num_steps):", "load_input(args.input_file) party = party_from_str(s) if args.sync: step = synchronise(party) print(f'Step = {step}') else:", "1, j[nw] - 1, ] party[(i, j)] = 0 # This is a", "e se = s & e nw = n & w sw =", "= True party[(party > 0) & is_adj] += 1 is_adj[:, :] = False", "w = j > 0 ne = n & e se = s", "action='store_true') args = parser.parse_args() s = load_input(args.input_file) party = party_from_str(s) if args.sync: step", "j < party.shape[1] - 1 s = i < party.shape[0] - 1 w", "- 1, j[w] - 1, j[nw] - 1, ] party[(i, j)] = 0", "load_input(file_name): with open(file_name, 'r') as file: return file.read() def party_from_str(s): lines = s.splitlines()", "while np.any(party > 0): party = update(party) step += 1 return step if", "parser = argparse.ArgumentParser( description='Count octopus flashes' ) parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help='input file name')", "lines]) def update(party): \"\"\"Update the party of octopuses. There is probably a faster", "party[(i, j)] = 0 # This is a bit slow and could be", "as file: return file.read() def party_from_str(s): lines = s.splitlines() return np.array([[int(i) for i", "= update(party) step += 1 return step if __name__ == '__main__': import argparse", "line in lines]) def update(party): \"\"\"Update the party of octopuses. There is probably", "j > 0 ne = n & e se = s & e", "northeast, east, southeast, south, southwest, west, northwest i_adj = [ i[n] - 1,", "np.any(party > 9): i, j = np.where(party > 9) n = i >", "for step in range(num_steps): party = update(party) count += np.count_nonzero(party == 0) return", "update(party) count += np.count_nonzero(party == 0) return count def synchronise(party): step = 0", "= np.full(party.shape, False) for d in range(8): is_adj[(i_adj[d], j_adj[d])] = True party[(party >", "+= 1 is_adj[:, :] = False return party def count_flashes(party, num_steps=100): count =", "& e se = s & e nw = n & w sw", "1, i[s] + 1, i[sw] + 1, i[w], i[nw] - 1, ] j_adj", "def synchronise(party): step = 0 while np.any(party > 0): party = update(party) step", "party_from_str(s): lines = s.splitlines() return np.array([[int(i) for i in line] for line in" ]
[]
[ "state: state -> (result, state') \"\"\" def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) ->", "-> let (a, s') = runState m s in runState (k a) s'", "\"\"\" self._fn = fn @classmethod def unit(cls, value: TSource) -> \"State[TSource, TState]\": r\"\"\"Create", "@classmethod def get(cls) -> \"State[TState, TState]\": r\"\"\"get = state $ \\s -> (s,", "newState = state $ \\s -> ((), newState)\"\"\" return State(lambda state: (Unit, new_state))", "cls(lambda state: (value, state)) def map(self, mapper: Callable[[TSource], TResult]) -> \"State[TResult, TState]\": def", "\"\"\" def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None: \"\"\"Initialize a new state.", "State $ \\s -> let (a, s') = runState m s in runState", "State(Generic[TSource, TState]): \"\"\"The state monad. Wraps stateful computations. A stateful computation is a", "-> Tuple[Any, Any]: return mapper(a), state return State(lambda state: _(*self.run(state))) def bind(self, fn:", "Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m >>= k = State $ \\s", "State $ \\s -> (x, s) \"\"\" return cls(lambda state: (value, state)) def", "new State. The unit function creates a new State object wrapping a stateful", "TResult = TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The state monad. Wraps stateful computations. A", "\"\"\"Return wrapped state computation. This is the inverse of unit and returns the", "self._fn = fn @classmethod def unit(cls, value: TSource) -> \"State[TSource, TState]\": r\"\"\"Create new", "= State $ \\s -> let (a, s') = runState m s in", "\"\"\"The state monad. Wraps stateful computations. A stateful computation is a function that", "creates a new State object wrapping a stateful computation. State $ \\s ->", "TState]\": r\"\"\"get = state $ \\s -> (s, s)\"\"\" return State(lambda state: (state,", "$ \\s -> (x, s) \"\"\" return cls(lambda state: (value, state)) def map(self,", "Generic from .util import Unit from .typing import Functor from .typing import Monad", "import Unit from .typing import Functor from .typing import Monad TState = TypeVar(\"TState\")", "stateful computation. State $ \\s -> (x, s) \"\"\" return cls(lambda state: (value,", "Monad TState = TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class State(Generic[TSource, TState]):", "newState)\"\"\" return State(lambda state: (Unit, new_state)) def run(self, state: TState) -> Tuple[TSource, TState]:", "A stateful computation is a function that takes a state and returns a", "from .typing import Functor from .typing import Monad TState = TypeVar(\"TState\") TSource =", "is the inverse of unit and returns the wrapped function. \"\"\" return self._fn(state)", "-> \"State[TState, TState]\": r\"\"\"get = state $ \\s -> (s, s)\"\"\" return State(lambda", "def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None: \"\"\"Initialize a new state. Keyword", "\"\"\" def _(result: Any, state: Any) -> Tuple[Any, Any]: return fn(result).run(state) return State(lambda", "return mapper(a), state return State(lambda state: _(*self.run(state))) def bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"])", "import Monad TState = TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class State(Generic[TSource,", "the wrapped function. \"\"\" return self._fn(state) def __call__(self, state: Any) -> Tuple: return", "new State object wrapping a stateful computation. State $ \\s -> (x, s)", "Callable[[TSource], TResult]) -> \"State[TResult, TState]\": def _(a: Any, state: Any) -> Tuple[Any, Any]:", "a) s' \"\"\" def _(result: Any, state: Any) -> Tuple[Any, Any]: return fn(result).run(state)", "State(lambda state: _(*self.run(state))) def bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m", "_(a: Any, state: Any) -> Tuple[Any, Any]: return mapper(a), state return State(lambda state:", "function that takes a state and returns a result and new state: state", "wrapping a stateful computation. State $ \\s -> (x, s) \"\"\" return cls(lambda", "return State(lambda state: _(*self.run(state))) def bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult, TState]\":", "= runState m s in runState (k a) s' \"\"\" def _(result: Any,", "state') \"\"\" def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None: \"\"\"Initialize a new", "map(self, mapper: Callable[[TSource], TResult]) -> \"State[TResult, TState]\": def _(a: Any, state: Any) ->", "-> ((), newState)\"\"\" return State(lambda state: (Unit, new_state)) def run(self, state: TState) ->", ">>= k = State $ \\s -> let (a, s') = runState m", "\\s -> let (a, s') = runState m s in runState (k a)", "state: (Unit, new_state)) def run(self, state: TState) -> Tuple[TSource, TState]: \"\"\"Return wrapped state", "value: TSource) -> \"State[TSource, TState]\": r\"\"\"Create new State. The unit function creates a", "state. Keyword arguments: fn -- State processor. \"\"\" self._fn = fn @classmethod def", "returns a result and new state: state -> (result, state') \"\"\" def __init__(self,", "fn: Callable[[TState], Tuple[TSource, TState]]) -> None: \"\"\"Initialize a new state. Keyword arguments: fn", "takes a state and returns a result and new state: state -> (result,", "TState) -> Tuple[TSource, TState]: \"\"\"Return wrapped state computation. This is the inverse of", "in runState (k a) s' \"\"\" def _(result: Any, state: Any) -> Tuple[Any,", "fn -- State processor. \"\"\" self._fn = fn @classmethod def unit(cls, value: TSource)", "Tuple[TSource, TState]: \"\"\"Return wrapped state computation. This is the inverse of unit and", "return State(lambda state: (Unit, new_state)) def run(self, state: TState) -> Tuple[TSource, TState]: \"\"\"Return", "$ \\s -> (s, s)\"\"\" return State(lambda state: (state, state)) @classmethod def put(cls,", "$ \\s -> ((), newState)\"\"\" return State(lambda state: (Unit, new_state)) def run(self, state:", "let (a, s') = runState m s in runState (k a) s' \"\"\"", "TState]: \"\"\"Return wrapped state computation. This is the inverse of unit and returns", "@classmethod def unit(cls, value: TSource) -> \"State[TSource, TState]\": r\"\"\"Create new State. The unit", "processor. \"\"\" self._fn = fn @classmethod def unit(cls, value: TSource) -> \"State[TSource, TState]\":", "import Callable, Tuple, Any, TypeVar, Generic from .util import Unit from .typing import", "TState) -> \"State[Tuple, TState]\": r\"\"\"put newState = state $ \\s -> ((), newState)\"\"\"", "TypeVar, Generic from .util import Unit from .typing import Functor from .typing import", "State(lambda state: _(*self.run(state))) @classmethod def get(cls) -> \"State[TState, TState]\": r\"\"\"get = state $", "a stateful computation. State $ \\s -> (x, s) \"\"\" return cls(lambda state:", "TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The state monad. Wraps stateful computations.", "runState m s in runState (k a) s' \"\"\" def _(result: Any, state:", "\"State[TState, TState]\": r\"\"\"get = state $ \\s -> (s, s)\"\"\" return State(lambda state:", "state: TState) -> Tuple[TSource, TState]: \"\"\"Return wrapped state computation. This is the inverse", "-> Tuple[TSource, TState]: \"\"\"Return wrapped state computation. This is the inverse of unit", "run(self, state: TState) -> Tuple[TSource, TState]: \"\"\"Return wrapped state computation. This is the", "the inverse of unit and returns the wrapped function. \"\"\" return self._fn(state) def", "new state. Keyword arguments: fn -- State processor. \"\"\" self._fn = fn @classmethod", "state: _(*self.run(state))) def bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m >>=", "\"State[Tuple, TState]\": r\"\"\"put newState = state $ \\s -> ((), newState)\"\"\" return State(lambda", "This is the inverse of unit and returns the wrapped function. \"\"\" return", "from typing import Callable, Tuple, Any, TypeVar, Generic from .util import Unit from", "k = State $ \\s -> let (a, s') = runState m s", "s in runState (k a) s' \"\"\" def _(result: Any, state: Any) ->", "runState (k a) s' \"\"\" def _(result: Any, state: Any) -> Tuple[Any, Any]:", "s') = runState m s in runState (k a) s' \"\"\" def _(result:", "get(cls) -> \"State[TState, TState]\": r\"\"\"get = state $ \\s -> (s, s)\"\"\" return", "TState]\": r\"\"\"put newState = state $ \\s -> ((), newState)\"\"\" return State(lambda state:", "r\"\"\"m >>= k = State $ \\s -> let (a, s') = runState", "Any) -> Tuple[Any, Any]: return fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod def get(cls)", "def __call__(self, state: Any) -> Tuple: return self.run(state) assert issubclass(State, Functor) assert issubclass(State,", "Any, TypeVar, Generic from .util import Unit from .typing import Functor from .typing", ".typing import Functor from .typing import Monad TState = TypeVar(\"TState\") TSource = TypeVar(\"TSource\")", "(k a) s' \"\"\" def _(result: Any, state: Any) -> Tuple[Any, Any]: return", "_(result: Any, state: Any) -> Tuple[Any, Any]: return fn(result).run(state) return State(lambda state: _(*self.run(state)))", "_(*self.run(state))) @classmethod def get(cls) -> \"State[TState, TState]\": r\"\"\"get = state $ \\s ->", "\"\"\" return self._fn(state) def __call__(self, state: Any) -> Tuple: return self.run(state) assert issubclass(State,", "from .typing import Monad TState = TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult = TypeVar(\"TResult\")", "@classmethod def put(cls, new_state: TState) -> \"State[Tuple, TState]\": r\"\"\"put newState = state $", "arguments: fn -- State processor. \"\"\" self._fn = fn @classmethod def unit(cls, value:", "(result, state') \"\"\" def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None: \"\"\"Initialize a", "monad. Wraps stateful computations. A stateful computation is a function that takes a", "Tuple[TSource, TState]]) -> None: \"\"\"Initialize a new state. Keyword arguments: fn -- State", "-> None: \"\"\"Initialize a new state. Keyword arguments: fn -- State processor. \"\"\"", "s) \"\"\" return cls(lambda state: (value, state)) def map(self, mapper: Callable[[TSource], TResult]) ->", "state $ \\s -> ((), newState)\"\"\" return State(lambda state: (Unit, new_state)) def run(self,", "return self._fn(state) def __call__(self, state: Any) -> Tuple: return self.run(state) assert issubclass(State, Functor)", "TState = TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The", "None: \"\"\"Initialize a new state. Keyword arguments: fn -- State processor. \"\"\" self._fn", "(Unit, new_state)) def run(self, state: TState) -> Tuple[TSource, TState]: \"\"\"Return wrapped state computation.", "\"State[TSource, TState]\": r\"\"\"Create new State. The unit function creates a new State object", "Callable[[TState], Tuple[TSource, TState]]) -> None: \"\"\"Initialize a new state. Keyword arguments: fn --", "TState]\": r\"\"\"m >>= k = State $ \\s -> let (a, s') =", "def run(self, state: TState) -> Tuple[TSource, TState]: \"\"\"Return wrapped state computation. This is", "s' \"\"\" def _(result: Any, state: Any) -> Tuple[Any, Any]: return fn(result).run(state) return", "function. \"\"\" return self._fn(state) def __call__(self, state: Any) -> Tuple: return self.run(state) assert", "unit(cls, value: TSource) -> \"State[TSource, TState]\": r\"\"\"Create new State. The unit function creates", "\\s -> (s, s)\"\"\" return State(lambda state: (state, state)) @classmethod def put(cls, new_state:", "from .util import Unit from .typing import Functor from .typing import Monad TState", "result and new state: state -> (result, state') \"\"\" def __init__(self, fn: Callable[[TState],", "return fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod def get(cls) -> \"State[TState, TState]\": r\"\"\"get", "State. The unit function creates a new State object wrapping a stateful computation.", "state computation. This is the inverse of unit and returns the wrapped function.", "new state: state -> (result, state') \"\"\" def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]])", "Functor from .typing import Monad TState = TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult =", "TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The state monad. Wraps stateful computations. A stateful computation", "def put(cls, new_state: TState) -> \"State[Tuple, TState]\": r\"\"\"put newState = state $ \\s", "\"State[TResult, TState]\": r\"\"\"m >>= k = State $ \\s -> let (a, s')", "self._fn(state) def __call__(self, state: Any) -> Tuple: return self.run(state) assert issubclass(State, Functor) assert", "mapper: Callable[[TSource], TResult]) -> \"State[TResult, TState]\": def _(a: Any, state: Any) -> Tuple[Any,", "state: Any) -> Tuple[Any, Any]: return fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod def", "\"\"\"Initialize a new state. Keyword arguments: fn -- State processor. \"\"\" self._fn =", "def _(result: Any, state: Any) -> Tuple[Any, Any]: return fn(result).run(state) return State(lambda state:", "Any, state: Any) -> Tuple[Any, Any]: return fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod", ".util import Unit from .typing import Functor from .typing import Monad TState =", "-> \"State[TSource, TState]\": r\"\"\"Create new State. The unit function creates a new State", "bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m >>= k = State", "state monad. Wraps stateful computations. A stateful computation is a function that takes", "<filename>oslash/state.py from typing import Callable, Tuple, Any, TypeVar, Generic from .util import Unit", "\"State[TResult, TState]\": def _(a: Any, state: Any) -> Tuple[Any, Any]: return mapper(a), state", "(a, s') = runState m s in runState (k a) s' \"\"\" def", "-> (s, s)\"\"\" return State(lambda state: (state, state)) @classmethod def put(cls, new_state: TState)", "object wrapping a stateful computation. State $ \\s -> (x, s) \"\"\" return", "return cls(lambda state: (value, state)) def map(self, mapper: Callable[[TSource], TResult]) -> \"State[TResult, TState]\":", "Any]: return fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod def get(cls) -> \"State[TState, TState]\":", "a new State object wrapping a stateful computation. State $ \\s -> (x,", "$ \\s -> let (a, s') = runState m s in runState (k", "fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod def get(cls) -> \"State[TState, TState]\": r\"\"\"get =", "-> (x, s) \"\"\" return cls(lambda state: (value, state)) def map(self, mapper: Callable[[TSource],", "Tuple[Any, Any]: return fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod def get(cls) -> \"State[TState,", "return State(lambda state: (state, state)) @classmethod def put(cls, new_state: TState) -> \"State[Tuple, TState]\":", "= TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The state", "a new state. Keyword arguments: fn -- State processor. \"\"\" self._fn = fn", "= fn @classmethod def unit(cls, value: TSource) -> \"State[TSource, TState]\": r\"\"\"Create new State.", ".typing import Monad TState = TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class", "-> \"State[TResult, TState]\": r\"\"\"m >>= k = State $ \\s -> let (a,", "(s, s)\"\"\" return State(lambda state: (state, state)) @classmethod def put(cls, new_state: TState) ->", "(state, state)) @classmethod def put(cls, new_state: TState) -> \"State[Tuple, TState]\": r\"\"\"put newState =", "a result and new state: state -> (result, state') \"\"\" def __init__(self, fn:", "= state $ \\s -> ((), newState)\"\"\" return State(lambda state: (Unit, new_state)) def", "unit function creates a new State object wrapping a stateful computation. State $", "State processor. \"\"\" self._fn = fn @classmethod def unit(cls, value: TSource) -> \"State[TSource,", "-> \"State[Tuple, TState]\": r\"\"\"put newState = state $ \\s -> ((), newState)\"\"\" return", "TState]]) -> None: \"\"\"Initialize a new state. Keyword arguments: fn -- State processor.", "-> \"State[TResult, TState]\": def _(a: Any, state: Any) -> Tuple[Any, Any]: return mapper(a),", "computations. A stateful computation is a function that takes a state and returns", "The unit function creates a new State object wrapping a stateful computation. State", "stateful computation is a function that takes a state and returns a result", "= state $ \\s -> (s, s)\"\"\" return State(lambda state: (state, state)) @classmethod", "-> (result, state') \"\"\" def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None: \"\"\"Initialize", "State object wrapping a stateful computation. State $ \\s -> (x, s) \"\"\"", "Any]: return mapper(a), state return State(lambda state: _(*self.run(state))) def bind(self, fn: Callable[[TSource], \"State[TState,", "\\s -> ((), newState)\"\"\" return State(lambda state: (Unit, new_state)) def run(self, state: TState)", "unit and returns the wrapped function. \"\"\" return self._fn(state) def __call__(self, state: Any)", "class State(Generic[TSource, TState]): \"\"\"The state monad. Wraps stateful computations. A stateful computation is", "-> Tuple[Any, Any]: return fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod def get(cls) ->", "Tuple, Any, TypeVar, Generic from .util import Unit from .typing import Functor from", "state: _(*self.run(state))) @classmethod def get(cls) -> \"State[TState, TState]\": r\"\"\"get = state $ \\s", "((), newState)\"\"\" return State(lambda state: (Unit, new_state)) def run(self, state: TState) -> Tuple[TSource,", "mapper(a), state return State(lambda state: _(*self.run(state))) def bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"]) ->", "Wraps stateful computations. A stateful computation is a function that takes a state", "Any) -> Tuple[Any, Any]: return mapper(a), state return State(lambda state: _(*self.run(state))) def bind(self,", "State(lambda state: (state, state)) @classmethod def put(cls, new_state: TState) -> \"State[Tuple, TState]\": r\"\"\"put", "_(*self.run(state))) def bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m >>= k", "inverse of unit and returns the wrapped function. \"\"\" return self._fn(state) def __call__(self,", "state: (state, state)) @classmethod def put(cls, new_state: TState) -> \"State[Tuple, TState]\": r\"\"\"put newState", "typing import Callable, Tuple, Any, TypeVar, Generic from .util import Unit from .typing", "def map(self, mapper: Callable[[TSource], TResult]) -> \"State[TResult, TState]\": def _(a: Any, state: Any)", "is a function that takes a state and returns a result and new", "TState]\": r\"\"\"Create new State. The unit function creates a new State object wrapping", "state: (value, state)) def map(self, mapper: Callable[[TSource], TResult]) -> \"State[TResult, TState]\": def _(a:", "Keyword arguments: fn -- State processor. \"\"\" self._fn = fn @classmethod def unit(cls,", "__call__(self, state: Any) -> Tuple: return self.run(state) assert issubclass(State, Functor) assert issubclass(State, Monad)", "\"State[TState, TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m >>= k = State $ \\s ->", "\"\"\" return cls(lambda state: (value, state)) def map(self, mapper: Callable[[TSource], TResult]) -> \"State[TResult,", "= TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The state monad. Wraps stateful computations. A stateful", "state: Any) -> Tuple[Any, Any]: return mapper(a), state return State(lambda state: _(*self.run(state))) def", "new_state)) def run(self, state: TState) -> Tuple[TSource, TState]: \"\"\"Return wrapped state computation. This", "of unit and returns the wrapped function. \"\"\" return self._fn(state) def __call__(self, state:", "r\"\"\"get = state $ \\s -> (s, s)\"\"\" return State(lambda state: (state, state))", "return State(lambda state: _(*self.run(state))) @classmethod def get(cls) -> \"State[TState, TState]\": r\"\"\"get = state", "state $ \\s -> (s, s)\"\"\" return State(lambda state: (state, state)) @classmethod def", "state and returns a result and new state: state -> (result, state') \"\"\"", "s)\"\"\" return State(lambda state: (state, state)) @classmethod def put(cls, new_state: TState) -> \"State[Tuple,", "TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The state monad.", "r\"\"\"Create new State. The unit function creates a new State object wrapping a", "(x, s) \"\"\" return cls(lambda state: (value, state)) def map(self, mapper: Callable[[TSource], TResult])", "state)) @classmethod def put(cls, new_state: TState) -> \"State[Tuple, TState]\": r\"\"\"put newState = state", "returns the wrapped function. \"\"\" return self._fn(state) def __call__(self, state: Any) -> Tuple:", "TSource) -> \"State[TSource, TState]\": r\"\"\"Create new State. The unit function creates a new", "(value, state)) def map(self, mapper: Callable[[TSource], TResult]) -> \"State[TResult, TState]\": def _(a: Any,", "put(cls, new_state: TState) -> \"State[Tuple, TState]\": r\"\"\"put newState = state $ \\s ->", "Callable, Tuple, Any, TypeVar, Generic from .util import Unit from .typing import Functor", "Tuple[Any, Any]: return mapper(a), state return State(lambda state: _(*self.run(state))) def bind(self, fn: Callable[[TSource],", "r\"\"\"put newState = state $ \\s -> ((), newState)\"\"\" return State(lambda state: (Unit,", "computation is a function that takes a state and returns a result and", "fn @classmethod def unit(cls, value: TSource) -> \"State[TSource, TState]\": r\"\"\"Create new State. The", "a function that takes a state and returns a result and new state:", "and new state: state -> (result, state') \"\"\" def __init__(self, fn: Callable[[TState], Tuple[TSource,", "TResult]) -> \"State[TResult, TState]\": def _(a: Any, state: Any) -> Tuple[Any, Any]: return", "function creates a new State object wrapping a stateful computation. State $ \\s", "wrapped state computation. This is the inverse of unit and returns the wrapped", "and returns a result and new state: state -> (result, state') \"\"\" def", "= TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The state monad. Wraps stateful", "def unit(cls, value: TSource) -> \"State[TSource, TState]\": r\"\"\"Create new State. The unit function", "computation. State $ \\s -> (x, s) \"\"\" return cls(lambda state: (value, state))", "TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m >>= k = State $ \\s -> let", "__init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None: \"\"\"Initialize a new state. Keyword arguments:", "state return State(lambda state: _(*self.run(state))) def bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult,", "new_state: TState) -> \"State[Tuple, TState]\": r\"\"\"put newState = state $ \\s -> ((),", "and returns the wrapped function. \"\"\" return self._fn(state) def __call__(self, state: Any) ->", "def _(a: Any, state: Any) -> Tuple[Any, Any]: return mapper(a), state return State(lambda", "State(lambda state: (Unit, new_state)) def run(self, state: TState) -> Tuple[TSource, TState]: \"\"\"Return wrapped", "-- State processor. \"\"\" self._fn = fn @classmethod def unit(cls, value: TSource) ->", "state)) def map(self, mapper: Callable[[TSource], TResult]) -> \"State[TResult, TState]\": def _(a: Any, state:", "stateful computations. A stateful computation is a function that takes a state and", "state -> (result, state') \"\"\" def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None:", "\\s -> (x, s) \"\"\" return cls(lambda state: (value, state)) def map(self, mapper:", "def get(cls) -> \"State[TState, TState]\": r\"\"\"get = state $ \\s -> (s, s)\"\"\"", "computation. This is the inverse of unit and returns the wrapped function. \"\"\"", "Any, state: Any) -> Tuple[Any, Any]: return mapper(a), state return State(lambda state: _(*self.run(state)))", "m s in runState (k a) s' \"\"\" def _(result: Any, state: Any)", "TState]\": def _(a: Any, state: Any) -> Tuple[Any, Any]: return mapper(a), state return", "TState]): \"\"\"The state monad. Wraps stateful computations. A stateful computation is a function", "def bind(self, fn: Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m >>= k =", "import Functor from .typing import Monad TState = TypeVar(\"TState\") TSource = TypeVar(\"TSource\") TResult", "fn: Callable[[TSource], \"State[TState, TResult]\"]) -> \"State[TResult, TState]\": r\"\"\"m >>= k = State $", "a state and returns a result and new state: state -> (result, state')", "that takes a state and returns a result and new state: state ->", "TSource = TypeVar(\"TSource\") TResult = TypeVar(\"TResult\") class State(Generic[TSource, TState]): \"\"\"The state monad. Wraps", "Unit from .typing import Functor from .typing import Monad TState = TypeVar(\"TState\") TSource", "wrapped function. \"\"\" return self._fn(state) def __call__(self, state: Any) -> Tuple: return self.run(state)" ]
[ "image, sound and hitmask dicts IMAGES, SOUNDS, HITMASKS = {}, {}, {} #", "playerIndex, basex if (loopIter + 1) % 5 == 0: playerIndex = next(playerIndexGen)", "False # True when player flaps # The counter to spawn new pipes", "uPipeRect, pHitMask, uHitmask) if uCollide: # for fury mode we want to break", "= (0, 0, 0) RED = (255, 50, 50) YELLOW = (255, 255,", "time WHITE = (255, 255, 255) BLACK = (0, 0, 0) RED =", "# yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list of backgrounds", "player's rotation playerVelRot = 3 # angular speed playerRotThr = 20 # rotation", "0 # pipe spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if a", "of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange except NameError: xrange", "lowerPipes, 'score': score, 'playerVelY': playerVelY, 'playerRot': playerRot } # 추가된 부분 if coinTest[0]:", "coins = [] else: if EASYMODE: DIFFICULTY = 4 # get 2 new", "SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey))", "\"fury mode\" button for welcome screen (with the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key']", "pipes particles amount (for each pipe) FURYMODE_PARTICLES = 8 # max particles for", "False] return [False, False] def pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks if two objects", "0, 190, 0) while True: # select random background sprites randBg = random.randint(0,", "showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score > TOPFIVE[4][1] and", "= pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi]", ") # list of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list", "'lowerPipes': lowerPipes, 'score': score, 'playerVelY': playerVelY, 'playerRot': playerRot } # 추가된 부분 if", ": SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update()", "with pipe lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if lCollide: # for fury", "padding) furymodeKeyX = furymodex + IMAGES['furymode'].get_width() + 8 furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height()", "[ {'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH", "'basex': basex, 'playerIndexGen': playerIndexGen, } elif (event.type == KEYDOWN and (event.key == K_SPACE", "particle is under the ground if particle['y'] >= BASEY: particles.remove(particle) else: # add", "pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe sprites pipeindex = random.randint(0, len(PIPES_LIST) - 1)", "checkCrash({'x': playerx, 'y': playery, 'index': playerIndex}, upperPipes, lowerPipes) # 추가된 부분 coinTest =", "= pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)", "= ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() )", "mode button (8 is right padding) furymodeKeyX = furymodex + IMAGES['furymode'].get_width() + 8", "35 # pipes particles amount (for each pipe) FURYMODE_PARTICLES = 8 # max", "return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } elif (event.type", "0: playerIndex = next(playerIndexGen) loopIter = (loopIter + 1) % 30 basex =", "- 1: return # player y shift if playery + playerHeight < BASEY", "== K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1): if playery >", "pipeW, pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0]", "- 1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles", "score+=1 # it's a lower pipe else: lowerPipes.remove(crashTest[3]) else: return { 'y': playery,", "paticles to the particle list randomly generated with pipe's rectangle (hitbox) \"\"\" global", "if playerRot > -90: playerRot -= playerVelRot # draw sprites overx = int((SCREENWIDTH", "ask(screen, question): \"ask(screen, question) -> answer\" pygame.font.init() current_string = [] display_box(screen, question +", "for uPipe, lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'],", "} # adjust playery, playerIndex, basex if (loopIter + 1) % 5 ==", "RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height() / 2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width()", "- 10) / 80 * (self.maxi - self.mini) + self.mini if self.val <", "in upperPipes: # pipe rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) # player", "first flap sound and return values for mainGame FURYMODE = True SOUNDS['wing'].play() return", "[ {'x': coinX, 'y': coinY}, ] def showScore(score): \"\"\"displays score in center of", "mode\" button for welcome screen (with the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] =", "(self.maxi - self.mini) + self.mini if self.val < self.mini: self.val = self.mini if", "import pygame from pygame.locals import * import time WHITE = (255, 255, 255)", "100) % baseShift) # rotate the player if playerRot > -90: playerRot -=", "1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True # make first flap sound and return", "* 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))", "Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\" Add paticles to the particle list", "return [ {'x': coinX, 'y': coinY}, ] def showScore(score): \"\"\"displays score in center", "mode # pipes are green if pipeindex == 0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(),", "check if a pipe must be removed from the list for uPipe in", "( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe sprites pipeindex = random.randint(0,", "all numbers to be printed for digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset", "Yposition) SLIDER = Slider(0.5, 1, 0, 190, 0) while True: # select random", "pipe # it's an upper pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's a", "+ player['h'] >= BASEY - 1: return [True, True] else: playerRect = pygame.Rect(player['x'],", "in lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for particle in particles:", "ypos): self.val = val # start value self.maxi = maxi # maximum at", "red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird ( # amount", "= SCREENWIDTH + 100 return [ {'x': coinX, 'y': coinY}, ] def showScore(score):", "FURYMODE: return [True, False, True, uPipe] # normal mode return [True, False] for", "= (0, 255, 50) BLUE = (50, 50, 255) GREY = (200, 200,", "# list of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of", "'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # adjust playery, playerIndex,", "of 3 positions of flap) PLAYERS_LIST = ( # red bird ( 'assets/sprites/redbird-upflap.png',", "rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height()", "(event.type == MOUSEBUTTONDOWN and event.button == 1): if playery + playerHeight >= BASEY", "+ playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # adjust playery, playerIndex, basex if", "reset furymodePipeFrameCounter = 0 # pipe spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) #", "return [True, False] for lPipe in lowerPipes: # pipe rect lPipeRect = pygame.Rect(lPipe['x'],", "True when player flaps # The counter to spawn new pipes furymodePipeFrameCounter =", "IMAGES['background'].get_width() # player shm for up-down motion on welcome screen playerShmVals = {'val':", "FURYMODE = True SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen':", "sys.exit() if (event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP))", "and lower pipe gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY +=", "(0, 0, 0) RED = (255, 50, 50) YELLOW = (255, 255, 0)", "pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m:", "count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1", "particle['y'] += particle['vy'] # gravity particle['vy'] += playerAccY # remove if the particle", "a lower pipe else: lowerPipes.remove(crashTest[3]) else: return { 'y': playery, 'groundCrash': crashTest[1], 'basex':", "upperPipes = [] # list of lowerpipe lowerPipes = [] # list of", "- self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini if", "/ 2) messagey = int(SCREENHEIGHT * 0.12) easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey", ") # game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome", "players downward accleration playerRot = 45 # player's rotation playerVelRot = 3 #", "score playerMidPos = playerx + IMAGES['player'][0].get_width() / 2 for pipe in upperPipes: pipeMidPos", "position left self.xpos = xpos # x-location on screen self.ypos = ypos #", "'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T',", "KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN", "we # must return which pipe is colliding (lower or upper) if FURYMODE:", "spawn new pipes furymodePipeFrameCounter = 0 while True: for event in pygame.event.get(): if", "for x in xrange(rect.width): for y in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return", "0.79 # image, sound and hitmask dicts IMAGES, SOUNDS, HITMASKS = {}, {},", "for welcome screen (with the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode']", "pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win' in sys.platform: soundExt = '.wav' else: soundExt =", "random angle for a minimum velocity vel = random.random() * 10 + 5", "rect.y - rect2.y for x in xrange(rect.width): for y in xrange(rect.height): if hitmask1[x1+x][y1+y]", "for i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height()", "pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume slider(defaultValue, maximum, minimum,", "pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites randPlayer = random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player']", "of upper pipes upperPipes = [] # list of lowerpipe lowerPipes = []", "# print score so player overlaps the score showScore(score) # Player rotation has", "8 # max particles for each pipe hit FURYMODE_PARTICLES_MAX = 48 # list", "1) % 5 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter + 1)", "between upper and lower pipe gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))", "graphics - button surface # self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE,", "SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit = False # Move", "of particles # a particle is an object with attributes: # {'x': position-x,", "*= -1 if playerShm['dir'] == 1: playerShm['val'] += 1 else: playerShm['val'] -= 1", "crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex}, upperPipes, lowerPipes) # 추가된 부분", "8 furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() / 2 basex = 0 # amount", "draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex],", "<reponame>Kimhwiwoong/Flappy_Bird_ver.2 from itertools import cycle from operator import itemgetter import random import sys", "pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the player down", "shows gameover image\"\"\" global FURYMODE, EASYMODE FURYMODE = False EASYMODE = False score", "playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score': score, 'playerVelY': playerVelY,", "IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth) / 2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit],", "change if (loopIter + 1) % 3 == 0: playerIndex = next(playerIndexGen) loopIter", "pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <= playerMidPos < pipeMidPos + 4:", "= ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png',", "which base can maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow", "+= particle['vy'] # gravity particle['vy'] += playerAccY # remove if the particle is", "main(): global SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))", "than in # normal mode, we add pipes with a \"timer\" (a frame", "motion on welcome screen playerShmVals = {'val': 0, 'dir': 1} # initialize volume", "screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10,", "image\"\"\" global FURYMODE, EASYMODE FURYMODE = False EASYMODE = False score = crashInfo['score']", "furymodey = int(SCREENHEIGHT * 0.80) # just at right of the fury mode", "crashTest[1]: spawnParticles(particles, crashTest[3]) # remove the pipe # it's an upper pipe if", "xpos # x-location on screen self.ypos = ypos # y-location on screen self.surf", "- 10, 200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2) - 102, (screen.get_height() /", "sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play() while True: for event in pygame.event.get(): if", "SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1],", "lower pipe gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY += int(BASEY", "playerIndexGen = movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery'] basex = movementInfo['basex']", ">= BASEY: particles.remove(particle) else: # add new pipes when first pipe is about", "# move pipes to left for uPipe in upperPipes: uPipe['x'] += pipeVelX for", "for mainGame FURYMODE = True SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex':", "in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode if (event.type == KEYDOWN and event.key", "here crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex}, upperPipes, lowerPipes) # 추가된", "coinTest[0]: score += 1 SOUNDS['point'].play() coins.pop(0) # check for score playerMidPos = playerx", "rect2, hitmask1, hitmask2): \"\"\"Checks if two objects collide and not just their rects\"\"\"", "'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y',", "= playerIndex = loopIter = 0 playerIndexGen = movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH", "부분 newCoin1 = getRandomCoin() newCoin2 = getRandomCoin() coins = [ {'x': SCREENWIDTH +", "FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the value of playerShm['val'] between 8 and -8\"\"\"", "= (200, 100, 50) CYAN = (0, 255, 255) MAGENTA = (255, 0,", "mask.append([]) for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key(): while 1: event", "0) GREEN = (0, 255, 50) BLUE = (50, 50, 255) GREY =", ") try: xrange except NameError: xrange = range class Keyboard(object): keys = {pygame.K_a:", "furymodePipeFrameCounter = 0 # pipe spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check", "(event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or (event.type", "(0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False", "lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된 부분 for coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'],", "(0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery +", "2 basex = 0 # amount by which base can maximum shift to", "loopIter = 0 playerx = int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height())", "IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for coin in coins: coinRect = pygame.Rect(coin['x'], coin['y'], coinW,", "sound and return values for mainGame FURYMODE = True SOUNDS['wing'].play() return { 'playery':", "hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True)", "for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key(): while 1: event =", "(basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def", "gapY - pipeHeight}, # upper pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, #", "cycle from operator import itemgetter import random import sys import math import pygame", "= random.random() * (aMax - aMin) + aMin particle['vx'] = math.cos(angle) * vel", "lowerPipes): \"\"\"returns True if player collders with base or pipes.\"\"\" global FURYMODE pi", "if (event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or", "amount by which base can maximum shift to left PIPEGAPSIZE = 130 #", "#SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'],", "newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) # list of upper pipes upperPipes =", "backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of pipes PIPES_LIST =", "if event.type == KEYDOWN: return event.key else: pass def display_box(screen, message): fontobject =", "playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery'] basex = movementInfo['basex'] baseShift = IMAGES['base'].get_width()", "lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit and die sounds SOUNDS['hit'].play() if not", "uHitmask = HITMASKS['pipe'][0] # if bird collided with pipe uCollide = pixelCollision(playerRect, uPipeRect,", "collide and not just their rects\"\"\" rect = rect1.clip(rect2) if rect.width == 0", "/ 2 if pipeMidPos <= playerMidPos < pipeMidPos + 4: score += 1", "255) GREY = (200, 200, 200) ORANGE = (200, 100, 50) CYAN =", "( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png',", "list(str(score))] totalWidth = 0 # total width of all numbers to be printed", "screen animation of flappy bird\"\"\" global FURYMODE, EASYMODE # index of player to", "'playerIndexGen': playerIndexGen, } elif (event.type == KEYDOWN and (event.key == K_SPACE or event.key", "playerVelRot # player's movement if playerVelY < playerMaxVelY and not playerFlapped: playerVelY +=", "# Player rotation has a threshold visibleRot = playerRotThr if playerRot <= playerRotThr:", "pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u:", "= [] # list of lowerpipe lowerPipes = [] # list of particles", "+ \"\".join(current_string)) return \"\".join(current_string) class Slider(): def __init__(self, val, maxi, mini, xpos, ypos):", "must spawn new pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter =", "EASYMODE = False score = crashInfo['score'] playerx = SCREENWIDTH * 0.2 playery =", "255) BLACK = (0, 0, 0) RED = (255, 50, 50) YELLOW =", "BASEY - playery - playerHeight) # move pipes to left for uPipe in", "self.val = val # start value self.maxi = maxi # maximum at slider", "playerRot = 45 playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY - playery -", "Add paticles to the particle list randomly generated with pipe's rectangle (hitbox) \"\"\"", "pipes and particles if FURYMODE: furymodePipeFrameCounter += 1 # the counter has the", "hitmasks pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] # if bird collided with pipe", "'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K',", "easymode if (event.type == KEYDOWN and event.key == K_2) or ((event.type == MOUSEBUTTONDOWN", "- PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH", "max descend speed playerMinVelY = -8 # min vel along Y, max ascend", "coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in upperPipes:", "# pipes' particles for fury mode # pipes are green if pipeindex ==", "random.randint(pipe['y'], pipe['y'] + pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1 # random angle", "= pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume slider(defaultValue, maximum,", "# list of particles # a particle is an object with attributes: #", "remove first pipe if its out of the screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():", "hits a pipe in fury mode if FURYMODE and not crashTest[1]: spawnParticles(particles, crashTest[3])", "velocity, downward accleration, accleration on flap playerVelY = -9 # player's velocity along", "추가된 부분 if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for", "in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME:", "# it's a lower pipe else: lowerPipes.remove(crashTest[3]) else: return { 'y': playery, 'groundCrash':", "playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score > TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy))", "score += 1 SOUNDS['point'].play() coins.pop(0) # check for score playerMidPos = playerx +", "# check for crash here crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},", "pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y:", "2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT * 0.12)", "IMAGES['furymode-key'].get_height() / 2 basex = 0 # amount by which base can maximum", "if two objects collide and not just their rects\"\"\" rect = rect1.clip(rect2) if", "IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY = 2 playerRot = crashInfo['playerRot'] playerVelRot = 7", "FPSCLOCK, SLIDER pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') #", "/ 80 * (self.maxi - self.mini) + self.mini if self.val < self.mini: self.val", "if FURYMODE: return [True, False, False, lPipe] # normal mode return [True, False]", "1: inkey = get_key() if inkey == K_BACKSPACE: current_string = current_string[0:-1] elif inkey", "basex if (loopIter + 1) % 5 == 0: playerIndex = next(playerIndexGen) loopIter", "question + \": \" + \"\".join(current_string)) while 1: inkey = get_key() if inkey", "on screen self.surf = pygame.surface.Surface((95, 40)) self.hit = False # the hit attribute", "EASYMODE = False # In fury mode, the pipe sapwn system is different", "to break the pipe so we # must return which pipe is colliding", "pipe sprites pipeindex = random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),", "spawn pipes at start if FURYMODE: # list of upper pipes upperPipes =", "'index': playerIndex}, coins) if crashTest[0]: # the player hits a pipe in fury", "int(SCREENHEIGHT * 0.12) easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT * 0.68)", "pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe ] # 추가된 부분 def", "HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] # if bird collided with pipe lCollide = pixelCollision(playerRect,", "SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for event in pygame.event.get(): if event.type == QUIT or", "xpos, ypos): self.val = val # start value self.maxi = maxi # maximum", "showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome screen animation of flappy", "values for mainGame SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen':", "# min vel along Y, max ascend speed playerAccY = 1 # players", "10 + 5 aMin = -math.pi * .35 aMax = math.pi * .25", "IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm for up-down motion on welcome screen playerShmVals", "== KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: pos", "# the player hits a pipe in fury mode if FURYMODE and not", "along Y, max ascend speed playerAccY = 1 # players downward accleration playerRot", "max velocity, downward accleration, accleration on flap playerVelY = -9 # player's velocity", "accleration playerRot = 45 # player's rotation playerVelRot = 3 # angular speed", "# gap between upper and lower part of pipe BASEY=SCREENHEIGHT * 0.79 #", "to spawn new pipes furymodePipeFrameCounter = 0 while True: for event in pygame.event.get():", "'y': playery, 'index': playerIndex}, coins) if crashTest[0]: # the player hits a pipe", "playerVelY < playerMaxVelY and not playerFlapped: playerVelY += playerAccY if playerFlapped: playerFlapped =", "pipes' particles if FURYMODE: for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex,", "lPipe] # normal mode return [True, False] return [False, False] # 추가된 부분", "in upperPipes: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <= playerMidPos", "SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh'", "2) -220)) for i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) -", "'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), )", "K_2) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE =", "# pipes' particles if FURYMODE: for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'],", "list for uPipe in upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in", "(6, 6), 6, 0) def draw(self): \"\"\" Combination of static and dynamic graphics", "printed for digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth)", "mode we want to break the pipe so we # must return which", "if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif", "( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange except NameError: xrange = range class Keyboard(object):", "pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n:", "pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit =", "< playerMaxVelY and not playerFlapped: playerVelY += playerAccY if playerFlapped: playerFlapped = False", "abs(playerShm['val']) == 8: playerShm['dir'] *= -1 if playerShm['dir'] == 1: playerShm['val'] += 1", "IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT * 0.12) easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2)", "pipes when first pipe is about to touch left of screen if 0", "# 추가된 부분 def checkCoin(player, coins): pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h']", "if FURYMODE and not crashTest[1]: spawnParticles(particles, crashTest[3]) # remove the pipe # it's", "if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter = 0 # pipe spawn", "# hitmask for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된", "Static graphics - slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5],", "val # start value self.maxi = maxi # maximum at slider position right", "BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of pipes PIPES_LIST = (", "+ \": \" + \"\".join(current_string)) while 1: inkey = get_key() if inkey ==", "is an object with attributes: # {'x': position-x, 'y': position-y, # 'vx': velocity-x,", "- 12, 204,24), 1) if len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width()", "# maximum at slider position right self.mini = mini # minimum at slider", "of flappy bird\"\"\" global FURYMODE, EASYMODE # index of player to blit on", "'basex': basex, 'playerIndexGen': playerIndexGen, } # (1) key for fury mode if (event.type", "playerRotThr if playerRot <= playerRotThr: visibleRot = playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface,", "(a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes particles amount (for each pipe)", "20 # rotation threshold playerFlapAcc = -9 # players speed on flapping playerFlapped", "- 1: playery += min(playerVelY, BASEY - playery - playerHeight) # player velocity", "remove) pipes and particles if FURYMODE: furymodePipeFrameCounter += 1 # the counter has", "event.type == KEYDOWN: return event.key else: pass def display_box(screen, message): fontobject = pygame.font.Font(None,18)", "showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome screen animation of flappy bird\"\"\" global FURYMODE, EASYMODE", "class Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e:", "random player sprites randPlayer = random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),", "upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in lowerPipes: if lPipe['x'] <", "# check if a pipe must be removed from the list for uPipe", "(lower or upper) if FURYMODE: return [True, False, False, lPipe] # normal mode", "current_string[0:-1] elif inkey == K_RETURN: break elif inkey == K_MINUS: current_string.append(\"_\") elif inkey", "{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe ] # 추가된 부분", "== 0: return False x1, y1 = rect.x - rect1.x, rect.y - rect1.y", "SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover", "K_SPACE or event.key == K_UP)) or ((event.type == MOUSEBUTTONDOWN and event.button == 1)", "an object with attributes: # {'x': position-x, 'y': position-y, # 'vx': velocity-x, 'vy':", "IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\" button for welcome screen (with the", "if playerRot > -90: playerRot -= playerVelRot # player's movement if playerVelY <", "lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for particle in particles: #", "upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된", "+= min(playerVelY, BASEY - playery - playerHeight) # player velocity change if playerVelY", "pipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if uCollide: # for fury mode", "'vx': velocity-x, 'vy': velocity-y, # 'i': index in textures list} particles = []", "math.cos(angle) * vel particle['vy'] = math.sin(angle) * vel particles.append(particle) # sound effect SOUNDS['hit'].play()", "# a particle is an object with attributes: # {'x': position-x, 'y': position-y,", "# 추가된 부분 if coinTest[0]: score += 1 SOUNDS['point'].play() coins.pop(0) # check for", "normal mode, we add pipes with a \"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES =", "player's velocity along Y, default same as playerFlapped playerMaxVelY = 10 # max", "playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'],", "so player overlaps the score showScore(score) # Player rotation has a threshold visibleRot", "mask def get_key(): while 1: event = pygame.event.poll() if event.type == KEYDOWN: return", "itemgetter import random import sys import math import pygame from pygame.locals import *", "upper) if FURYMODE: return [True, False, True, uPipe] # normal mode return [True,", "- IMAGES['furymode'].get_width()) / 2) furymodey = int(SCREENHEIGHT * 0.80) # just at right", "/ 2) furymodey = int(SCREENHEIGHT * 0.80) # just at right of the", "in # normal mode, we add pipes with a \"timer\" (a frame counter)", "score in center of screen\"\"\" scoreDigits = [int(x) for x in list(str(score))] totalWidth", "# numbers sprites for score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(),", "+ 280 + (SCREENWIDTH / 2), 'y': newCoin2[0]['y']}, ] pipeVelX = -4 #", "IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True # make first flap sound and return values for", "[] # 추가된 부분 coins = [] else: if EASYMODE: DIFFICULTY = 4", "< -IMAGES['coin'].get_width(): coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0],", "upperPipes.pop(0) # 추가된 부분 if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'],", "if bird collided with pipe lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if lCollide:", "the player down ans shows gameover image\"\"\" global FURYMODE, EASYMODE FURYMODE = False", "= pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\"", "of the basic slide surface \"\"\" # static surf = self.surf.copy() # dynamic", "self.val = (pygame.mouse.get_pos()[0] - self.xpos - 10) / 80 * (self.maxi - self.mini)", "pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite IMAGES['gameover'] =", "* (aMax - aMin) + aMin particle['vx'] = math.cos(angle) * vel particle['vy'] =", "# 추가된 부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation() crashInfo =", "IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha()", "== MOUSEBUTTONDOWN and event.button == 1): if playery > -2 * IMAGES['player'][0].get_height(): playerVelY", "player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW", "collided with pipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if uCollide: # for", "in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score so player", "movement if playerVelY < playerMaxVelY and not playerFlapped: playerVelY += playerAccY if playerFlapped:", "surf = self.surf.copy() # dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf,", "for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and", "particles for fury mode # pipes are green if pipeindex == 0: IMAGES['pipe-particle']", "0, 0) RED = (255, 50, 50) YELLOW = (255, 255, 0) GREEN", "= pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe", "+ 200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y':", "/ 2) - 12, 204,24), 1) if len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1,", "10 return [ {'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe {'x':", "== 0: playerIndex = next(playerIndexGen) loopIter = (loopIter + 1) % 30 basex", "SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery", "furymodePipeFrameCounter += 1 # the counter has the max value, we must spawn", "rect.width == 0 or rect.height == 0: return False x1, y1 = rect.x", "player shm for up-down motion on welcome screen playerShmVals = {'val': 0, 'dir':", "/ remove) pipes and particles if FURYMODE: furymodePipeFrameCounter += 1 # the counter", "in visible rotation) playerRot = 45 playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY", "pipe is about to touch left of screen if 0 < upperPipes[0]['x'] <", "player['y'], player['w'], player['h']) coinW = IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for coin in coins:", "FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0 score = playerIndex = loopIter = 0", "= (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH = 288 SCREENHEIGHT =", "getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) # list of upper pipes upperPipes = [ {'x':", "= [ {'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH + 200 +", "SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score so player overlaps the", "player['h'] = IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW = IMAGES['coin'].get_width() coinH", "# player's velocity along Y, default same as playerFlapped playerMaxVelY = 10 #", "pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move", "== K_1) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): #", "# just at right of the fury mode button (8 is right padding)", "{pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g:", "def getRandomCoin(): \"\"\" returns a randomly generated coin \"\"\" coinY = random.randrange(20, int(BASEY", "1} # initialize volume for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for event", "/ 2) - 10, 200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2) - 102,", "- 10)) pygame.display.flip() def ask(screen, question): \"ask(screen, question) -> answer\" pygame.font.init() current_string =", ") # list of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange", "= 0 # total width of all numbers to be printed for digit", "100, (screen.get_height() / 2) - 10, 200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2)", "button (8 is right padding) furymodeKeyX = furymodex + IMAGES['furymode'].get_width() + 8 furymodeKeyY", "random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites randPlayer", "False score = crashInfo['score'] playerx = SCREENWIDTH * 0.2 playery = crashInfo['y'] playerHeight", "'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'}", "for crash here crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex}, upperPipes, lowerPipes)", "K_1) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make", "pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) #", "upperPipes, 'lowerPipes': lowerPipes, 'score': score, 'playerVelY': playerVelY, 'playerRot': playerRot } # 추가된 부분", "3 positions of flap) PLAYERS_LIST = ( # red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png',", "PIPEGAPSIZE}, # lower pipe ] # 추가된 부분 def getRandomCoin(): \"\"\" returns a", "= [] else: if EASYMODE: DIFFICULTY = 4 # get 2 new pipes", "FURYMODE_PARTICLES) - 1 # random angle for a minimum velocity vel = random.random()", "particles amount (for each pipe) FURYMODE_PARTICLES = 8 # max particles for each", "180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles for fury mode # pipes are green", "hit and die sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play() while True: for event", "= pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites for score display IMAGES['numbers'] =", "upper pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe ] #", "= crashInfo['playerVelY'] playerAccY = 2 playerRot = crashInfo['playerRot'] playerVelRot = 7 count=0 gameover", "the user plays the fury mode FURYMODE = False EASYMODE = False #", "pygame.display.update() gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover == True):", "'y': playery, 'index': playerIndex}, upperPipes, lowerPipes) # 추가된 부분 coinTest = checkCoin({'x': playerx,", "upperPipes: # pipe rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) # player and", "for fury mode we want to break the pipe so we # must", "[False, False] def pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks if two objects collide and", "= (loopIter + 1) % 30 basex = -((-basex + 4) % baseShift)", "SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play() while True: for event in pygame.event.get(): if event.type", "pipeindex = random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),", "FURYMODE_PARTICLES_MAX = 48 # list of all possible players (tuple of 3 positions", "False] return [False, False] # 추가된 부분 def checkCoin(player, coins): pi = player['index']", "- 75, (screen.get_height() / 2) - 50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() / 2)", "'assets/sprites/redbird-downflap.png', ), # blue bird ( # amount by which base can maximum", "> -2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped = True SOUNDS['wing'].play() # check", "return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # (1)", "self.ypos)) def move(self): \"\"\" The dynamic part; reacts to movement of the slider", "= crashInfo['score'] playerx = SCREENWIDTH * 0.2 playery = crashInfo['y'] playerHeight = IMAGES['player'][0].get_height()", "= pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base", "and event.key == K_ESCAPE): pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos()", "SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height() / 2) -160 + (50*i))) FPSCLOCK.tick(FPS)", "to the particle list randomly generated with pipe's rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES,", "playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } elif (event.type == KEYDOWN and (event.key ==", "collided with pipe lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if lCollide: # for", "SOUNDS['wing'].play() # check for crash here crashTest = checkCrash({'x': playerx, 'y': playery, 'index':", "DIFFICULTY = 0 score = playerIndex = loopIter = 0 playerIndexGen = movementInfo['playerIndexGen']", "on welcome screen playerShmVals = {'val': 0, 'dir': 1} # initialize volume for", "= (pygame.mouse.get_pos()[0] - self.xpos - 10) / 80 * (self.maxi - self.mini) +", "= ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe sprites pipeindex =", "return event.key else: pass def display_box(screen, message): fontobject = pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30)", "playerRot > -90: playerRot -= playerVelRot # draw sprites overx = int((SCREENWIDTH -", "particle['vx'] = math.cos(angle) * vel particle['vy'] = math.sin(angle) * vel particles.append(particle) # sound", "pipes.\"\"\" global FURYMODE pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() #", "by which base can maximum shift to left PIPEGAPSIZE = 130 # gap", "pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for pipes HITMASKS['pipe']", "( # red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird (", "K_SPACE or event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1):", "mode FURYMODE = False EASYMODE = False # In fury mode, the pipe", "* 0.74) furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2) furymodey = int(SCREENHEIGHT *", "= checkCoin({'x': playerx, 'y': playery, 'index': playerIndex}, coins) if crashTest[0]: # the player", "1, (255,255,255)), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10)) pygame.display.flip()", "= False # the hit attribute indicates slider movement due to mouse interaction", "(ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\" button for welcome screen", "player velocity, max velocity, downward accleration, accleration on flap playerVelY = -9 #", "\"\"\"oscillates the value of playerShm['val'] between 8 and -8\"\"\" if abs(playerShm['val']) == 8:", "= int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT * 0.74) furymodex = int((SCREENWIDTH -", "image's alpha.\"\"\" mask = [] for x in xrange(image.get_width()): mask.append([]) for y in", "1: playerShm['val'] += 1 else: playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100", "= ( getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation():", "bird ( # amount by which base can maximum shift to left 'assets/sprites/bluebird-upflap.png',", "pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the value of playerShm['val'] between 8 and -8\"\"\" if", "- 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites randPlayer = random.randint(0,", "out of the screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분", "the ground if particle['y'] >= BASEY: particles.remove(particle) else: # add new pipes when", "FURYMODE = False EASYMODE = False score = crashInfo['score'] playerx = SCREENWIDTH *", "uPipe, lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex,", "= [] display_box(screen, question + \": \" + \"\".join(current_string)) while 1: inkey =", "(pygame.mouse.get_pos()[0] - self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini", "newCoin1 = getRandomCoin() newCoin2 = getRandomCoin() coins = [ {'x': SCREENWIDTH + 280,", "# In fury mode, the pipe sapwn system is different than in #", "counter has the max value, we must spawn new pipes if furymodePipeFrameCounter ==", "of static and dynamic graphics in a copy of the basic slide surface", "in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된 부분 for coin in coins: SCREEN.blit(IMAGES['coin'],", "pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite", "if the particle is under the ground if particle['y'] >= BASEY: particles.remove(particle) else:", "def showGameOverScreen(crashInfo): \"\"\"crashes the player down ans shows gameover image\"\"\" global FURYMODE, EASYMODE", "which pipe is colliding (lower or upper) if FURYMODE: return [True, False, True,", "pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s:", "not crashInfo['groundCrash']: SOUNDS['die'].play() while True: for event in pygame.event.get(): if event.type == QUIT", "= IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for coin in coins: coinRect = pygame.Rect(coin['x'], coin['y'],", "RED = (255, 50, 50) YELLOW = (255, 255, 0) GREEN = (0,", "'score': score, 'playerVelY': playerVelY, 'playerRot': playerRot } # 추가된 부분 if coinTest[0]: score", "sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe in", "pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i:", "crash if not crashInfo['groundCrash']: if playerRot > -90: playerRot -= playerVelRot # draw", "(lPipe['x'], lPipe['y'])) # 추가된 부분 for coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) #", "if its out of the screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) #", "'basex': basex, 'playerIndexGen': playerIndexGen, } # adjust playery, playerIndex, basex if (loopIter +", "# select random pipe sprites pipeindex = random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] =", "uPipe['x'] += pipeVelX for lPipe in lowerPipes: lPipe['x'] += pipeVelX # 추가된 부분", "MOUSEBUTTONDOWN and event.button == 1): if playery + playerHeight >= BASEY - 1:", "rotation has a threshold visibleRot = playerRotThr if playerRot <= playerRotThr: visibleRot =", "+ 100 return [ {'x': coinX, 'y': coinY}, ] def showScore(score): \"\"\"displays score", "range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height() / 2) -160", "uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if uCollide: # for fury mode we", "+ playerHeight < BASEY - 1: playery += min(playerVelY, BASEY - playery -", "{}, {} # True if the user plays the fury mode FURYMODE =", "and (event.key == K_SPACE or event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN and", "player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe in upperPipes: #", "def pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks if two objects collide and not just", "descend speed playerMinVelY = -8 # min vel along Y, max ascend speed", "255, 0) GREEN = (0, 255, 50) BLUE = (50, 50, 255) GREY", "- IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT * 0.12) easymodex = int((SCREENWIDTH -", "pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5], 0) # dynamic graphics - button surface", "shift to left PIPEGAPSIZE = 130 # gap between upper and lower part", "playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex,", "'Y', pygame.K_z: 'Z'} def main(): global SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK = pygame.time.Clock()", "if playerVelY < playerMaxVelY and not playerFlapped: playerVelY += playerAccY if playerFlapped: playerFlapped", "0 # total width of all numbers to be printed for digit in", "(SCREENWIDTH / 2), 'y': newPipe2[1]['y']}, ] # 추가된 부분 newCoin1 = getRandomCoin() newCoin2", "K_BACKSPACE: current_string = current_string[0:-1] elif inkey == K_RETURN: break elif inkey == K_MINUS:", "# 추가된 부분 for coin in coins: coin['x'] += pipeVelX # update (add", "if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse=", "+ 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']}, ] # list of lowerpipe", "lPipe in lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for particle in", "screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the", "= random.randint(pipe['x'], pipe['x'] + pipeW) particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH) particle['i'] =", "upperPipes.remove(crashTest[3]) score+=1 # it's a lower pipe else: lowerPipes.remove(crashTest[3]) else: return { 'y':", "10, 200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2) - 102, (screen.get_height() / 2)", "if playery > -2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped = True SOUNDS['wing'].play()", "!= 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width() / 2) - 75, (screen.get_height() /", "FURYMODE, EASYMODE # index of player to blit on screen playerIndex = 0", "soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume", "move of button box to correct screen position # screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf,", "vel particles.append(particle) # sound effect SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True if", "# normal mode, we add pipes with a \"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES", "== pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif event.type ==", "score = playerIndex = loopIter = 0 playerIndexGen = movementInfo['playerIndexGen'] playerx, playery =", "IMAGES['pipe'][0].get_height() for uPipe in upperPipes: # pipe rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW,", "playerVelRot = 3 # angular speed playerRotThr = 20 # rotation threshold playerFlapAcc", "slide surface \"\"\" # static surf = self.surf.copy() # dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80),", "positions of flap) PLAYERS_LIST = ( # red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png',", "SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\" Add", "(event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN:", "< -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for particle in particles: # speed particle['x'] +=", "# upper pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe ]", "totalWidth = 0 # total width of all numbers to be printed for", "box to correct screen position # screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def", "mainGame SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, }", "counter reset furymodePipeFrameCounter = 0 # pipe spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1])", "getHitmask(image): \"\"\"returns a hitmask using an image's alpha.\"\"\" mask = [] for x", "288 SCREENHEIGHT = 512 # amount by which base can maximum shift to", "pipe is colliding (lower or upper) if FURYMODE: return [True, False, False, lPipe]", "upper and lower part of pipe BASEY=SCREENHEIGHT * 0.79 # image, sound and", "baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else :", "False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns a hitmask", "* 0.12) easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT * 0.68) hardmodex", "for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()", "return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns a", "maximum shift to left PIPEGAPSIZE = 130 # gap between upper and lower", "while True: # select random background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)", "1: playery += min(playerVelY, BASEY - playery - playerHeight) # player velocity change", "pipeW, pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0]", "uPipe] # normal mode return [True, False] for lPipe in lowerPipes: # pipe", "0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), )", "playery, 'index': playerIndex}, upperPipes, lowerPipes) # 추가된 부분 coinTest = checkCoin({'x': playerx, 'y':", "100 return [ {'x': coinX, 'y': coinY}, ] def showScore(score): \"\"\"displays score in", "or upper) if FURYMODE: return [True, False, False, lPipe] # normal mode return", "0, 'dir': 1} # initialize volume for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True:", "SOUNDS, HITMASKS = {}, {}, {} # True if the user plays the", "loopIter = 0 playerIndexGen = movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery']", "False] def pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks if two objects collide and not", "0) # dynamic graphics - button surface # self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS)", "SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']}, ] # list of", "# total width of all numbers to be printed for digit in scoreDigits:", "aMin = -math.pi * .35 aMax = math.pi * .25 angle = random.random()", "# (1) key for fury mode if (event.type == KEYDOWN and event.key ==", "mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key(): while 1: event = pygame.event.poll() if event.type ==", "return [False, False] def pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks if two objects collide", "messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT * 0.12) easymodex", "writeScore(score) count=count+1 pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000)", "uCollide: # for fury mode we want to break the pipe so we", "or upper) if FURYMODE: return [True, False, True, uPipe] # normal mode return", "\"\"\"crashes the player down ans shows gameover image\"\"\" global FURYMODE, EASYMODE FURYMODE =", "new pipes to add to upperPipes lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 =", "/ 2) -220)) for i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2)", "(messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY))", "# player velocity change if playerVelY < 15: playerVelY += playerAccY # rotate", "for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode if (event.type == KEYDOWN", "remove the pipe # it's an upper pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 #", "# pipes particles amount (for each pipe) FURYMODE_PARTICLES = 8 # max particles", "lowerPipes.remove(lPipe) # particles for particle in particles: # speed particle['x'] += particle['vx'] particle['y']", "pygame.display.update() def showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2) -132,", "of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of pipes PIPES_LIST", "uPipe in upperPipes: uPipe['x'] += pipeVelX for lPipe in lowerPipes: lPipe['x'] += pipeVelX", "== 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap sound and return values for", "30, 80, 5], 0) # dynamic graphics - button surface # self.button_surf =", "or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return {", "a message in a box in the middle of the screen\" pygame.draw.rect(screen, (0,0,0),", "HITMASKS = {}, {}, {} # True if the user plays the fury", "playerRotThr = 20 # rotation threshold playerFlapAcc = -9 # players speed on", "200) ORANGE = (200, 100, 50) CYAN = (0, 255, 255) MAGENTA =", "-> answer\" pygame.font.init() current_string = [] display_box(screen, question + \": \" + \"\".join(current_string))", "- IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT * 0.68) hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey", "= getRandomCoin() coins.append(newCoin[0]) # remove first pipe if its out of the screen", "showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() /", "= pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' +", "is right padding) furymodeKeyX = furymodex + IMAGES['furymode'].get_width() + 8 furymodeKeyY = furymodey", "+ pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1 # random angle for a", "pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's a lower pipe else: lowerPipes.remove(crashTest[3]) else:", "\"\"\"returns a hitmask using an image's alpha.\"\"\" mask = [] for x in", "of player to blit on screen playerIndex = 0 playerIndexGen = cycle([0, 1,", "= 512 # amount by which base can maximum shift to left PIPEGAPSIZE", "1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() / 2) -220)) for i in range(0,5)", "# speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha()", "pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite IMAGES['gameover']", "in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key ==", "the particle is under the ground if particle['y'] >= BASEY: particles.remove(particle) else: #", "of playerShm['val'] between 8 and -8\"\"\" if abs(playerShm['val']) == 8: playerShm['dir'] *= -1", "< pipeMidPos + 4: score += 1 SOUNDS['point'].play() # playerIndex basex change if", "bird collided with pipe lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if lCollide: #", "= -4 # player velocity, max velocity, downward accleration, accleration on flap playerVelY", "'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange except NameError: xrange = range class Keyboard(object): keys", "1): if playery + playerHeight >= BASEY - 1: return # player y", "pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\" button for welcome screen (with the key) IMAGES['furymode']", "# amount by which base can maximum shift to left PIPEGAPSIZE = 130", "-9 # players speed on flapping playerFlapped = False # True when player", "die sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play() while True: for event in pygame.event.get():", "range(FURYMODE_PARTICLES_MAX): particle = {} particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW) particle['y'] = random.randint(pipe['y'],", "blue bird ( # amount by which base can maximum shift to left", "== 0 or rect.height == 0: return False x1, y1 = rect.x -", "# make first flap sound and return values for mainGame FURYMODE = True", "<= playerRotThr: visibleRot = playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update()", "first pipe is about to touch left of screen if 0 < upperPipes[0]['x']", "loopIter = (loopIter + 1) % 30 basex = -((-basex + 100) %", "200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},", "(event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if (event.type == KEYDOWN", "def ask(screen, question): \"ask(screen, question) -> answer\" pygame.font.init() current_string = [] display_box(screen, question", "== 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex,", "pipes upperPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH +", "int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2) overy = int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for", "= 45 # player's rotation playerVelRot = 3 # angular speed playerRotThr =", "{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH /", "\"\"\" # static surf = self.surf.copy() # dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect", "WHITE = (255, 255, 255) BLACK = (0, 0, 0) RED = (255,", "playerIndex = loopIter = 0 playerIndexGen = movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH *", "display_box(screen, question + \": \" + \"\".join(current_string)) while 1: inkey = get_key() if", "screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10))", "crashInfo['playerRot'] playerVelRot = 7 count=0 gameover = True basex = crashInfo['basex'] upperPipes, lowerPipes", "= HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if cCollide", "/ 2) - 50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() / 2) - 100, (screen.get_height()", "event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex':", "mode if (event.type == KEYDOWN and event.key == K_1) or ((event.type == MOUSEBUTTONDOWN", "or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True", "new pipes furymodePipeFrameCounter = 0 while True: for event in pygame.event.get(): if event.type", "EASYMODE DIFFICULTY = 0 score = playerIndex = loopIter = 0 playerIndexGen =", "self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move of button box to", "upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit and die sounds SOUNDS['hit'].play() if", "SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode if (event.type == KEYDOWN and event.key ==", "1) % 3 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter + 1)", "lowerPipes = [] # list of particles # a particle is an object", "cycle([0, 1, 2, 1]) # iterator used to change playerIndex after every 5th", "/ 2), 'y': newPipe2[0]['y']}, ] # list of lowerpipe lowerPipes = [ {'x':", "must return which pipe is colliding (lower or upper) if FURYMODE: return [True,", "playerIndex}, upperPipes, lowerPipes) # 추가된 부분 coinTest = checkCoin({'x': playerx, 'y': playery, 'index':", "import cycle from operator import itemgetter import random import sys import math import", "position-y, # 'vx': velocity-x, 'vy': velocity-y, # 'i': index in textures list} particles", "((screen.get_width() / 2) - 75, (screen.get_height() / 2) - 50)) screen.blit(fontobject.render(message, 1, (255,255,255)),", "# remove first pipe if its out of the screen if upperPipes[0]['x'] <", "sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True", "dynamic graphics in a copy of the basic slide surface \"\"\" # static", "* 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\" Add paticles to the", "ground if player['y'] + player['h'] >= BASEY - 1: return [True, True] else:", "= random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), )", "2) + 75, (SCREEN.get_height() / 2) -160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm):", "[True, False, False, lPipe] # normal mode return [True, False] return [False, False]", "\"\".join(current_string) class Slider(): def __init__(self, val, maxi, mini, xpos, ypos): self.val = val", "basex change if (loopIter + 1) % 3 == 0: playerIndex = next(playerIndexGen)", "= maxi # maximum at slider position right self.mini = mini # minimum", "playerIndex after every 5th iteration loopIter = 0 playerx = int(SCREENWIDTH * 0.2)", "coins.pop(0) # check for score playerMidPos = playerx + IMAGES['player'][0].get_width() / 2 for", "= 3 # angular speed playerRotThr = 20 # rotation threshold playerFlapAcc =", "추가된 부분 def getRandomCoin(): \"\"\" returns a randomly generated coin \"\"\" coinY =", "for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된 부분 for coin in", "playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # (1) key for fury mode if", "pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the player down ans shows gameover image\"\"\" global", "display_box(screen, question + \": \" + \"\".join(current_string)) return \"\".join(current_string) class Slider(): def __init__(self,", ": SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height() / 2) -160 +", "KEYDOWN and event.key == K_2) or ((event.type == MOUSEBUTTONDOWN and event.button == 1)", "list of lowerpipe lowerPipes = [] # list of particles # a particle", "playerIndexGen, } # adjust playery, playerIndex, basex if (loopIter + 1) % 5", "event.key == K_UP)) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())):", "FURYMODE: # list of upper pipes upperPipes = [] # list of lowerpipe", "'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score': score, 'playerVelY': playerVelY, 'playerRot': playerRot } # 추가된", "= pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\" button for welcome screen (with the key)", "self.val = self.mini if self.val > self.maxi: self.val = self.maxi if __name__ ==", "== 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True # make first flap sound and", "(50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height() / 2) -160 + (50*i)))", "200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']}, ] # 추가된 부분 newCoin1 =", "score += 1 SOUNDS['point'].play() # playerIndex basex change if (loopIter + 1) %", "count=0 gameover = True basex = crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] #", "playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) /", "to touch left of screen if 0 < upperPipes[0]['x'] < 5: newPipe =", "self.mini = mini # minimum at slider position left self.xpos = xpos #", "0.12) easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT * 0.68) hardmodex =", "basex = -((-basex + 100) % baseShift) # rotate the player if playerRot", "추가된 부분 newCoin1 = getRandomCoin() newCoin2 = getRandomCoin() coins = [ {'x': SCREENWIDTH", "# volume slider(defaultValue, maximum, minimum, Xposition, Yposition) SLIDER = Slider(0.5, 1, 0, 190,", "45 playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY - playery - playerHeight) #", "= pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win' in sys.platform: soundExt = '.wav' else: soundExt", "fury mode # pipes are green if pipeindex == 0: IMAGES['pipe-particle'] = (", "playerShm(playerShm): \"\"\"oscillates the value of playerShm['val'] between 8 and -8\"\"\" if abs(playerShm['val']) ==", "= SCREENWIDTH + 10 return [ {'x': pipeX, 'y': gapY - pipeHeight}, #", "velocity-x, 'vy': velocity-y, # 'i': index in textures list} particles = [] #", "True elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit = False # Move volume slider if", "SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True if player collders with base or", "pipe is colliding (lower or upper) if FURYMODE: return [True, False, True, uPipe]", "for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된 부분 HITMASKS['coin']", "message in a box in the middle of the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width()", "between upper and lower part of pipe BASEY=SCREENHEIGHT * 0.79 # image, sound", "= random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites", "base or pipes.\"\"\" global FURYMODE pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] =", "204,24), 1) if len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width() / 2)", "particle list randomly generated with pipe's rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS", "playerx = int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex", "IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\" Add paticles to the particle list randomly generated", "(160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey))", "# hismask for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for", "# angular speed playerRotThr = 20 # rotation threshold playerFlapAcc = -9 #", "# speed particle['x'] += particle['vx'] particle['y'] += particle['vy'] # gravity particle['vy'] += playerAccY", "as playerFlapped playerMaxVelY = 10 # max vel along Y, max descend speed", "(255,255,255)), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10)) pygame.display.flip() def", "* 10 \"\"\" returns a randomly generated pipe \"\"\" # y of gap", "2) furymodey = int(SCREENHEIGHT * 0.80) # just at right of the fury", "mode return [True, False] for lPipe in lowerPipes: # pipe rect lPipeRect =", "playerVelY, 'playerRot': playerRot } # 추가된 부분 if coinTest[0]: score += 1 SOUNDS['point'].play()", "False, lPipe] # normal mode return [True, False] return [False, False] # 추가된", "- playery - playerHeight) # player velocity change if playerVelY < 15: playerVelY", "BLUE = (50, 50, 255) GREY = (200, 200, 200) ORANGE = (200,", "PLAYERS_LIST = ( # red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue", "< self.mini: self.val = self.mini if self.val > self.maxi: self.val = self.maxi if", "+ 100) % baseShift) # rotate the player if playerRot > -90: playerRot", "SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } #", "pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit", "pipeMidPos + 4: score += 1 SOUNDS['point'].play() # playerIndex basex change if (loopIter", "slider position right self.mini = mini # minimum at slider position left self.xpos", "{'val': 0, 'dir': 1} # initialize volume for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while", "10 # max vel along Y, max descend speed playerMinVelY = -8 #", "SLIDER pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers", "return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # adjust", "pipe BASEY=SCREENHEIGHT * 0.79 # image, sound and hitmask dicts IMAGES, SOUNDS, HITMASKS", "0.6)) coinX = SCREENWIDTH + 100 return [ {'x': coinX, 'y': coinY}, ]", "for y in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return False def", "playerShm['val'] between 8 and -8\"\"\" if abs(playerShm['val']) == 8: playerShm['dir'] *= -1 if", "SCREENWIDTH + 200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2),", "= ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle']", "= HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if cCollide : return [True,", "( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) #", "= random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) #", "and event.key == K_1) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and", "# y of gap between upper and lower pipe gapY = random.randrange(0, int(BASEY", "False x1, y1 = rect.x - rect1.x, rect.y - rect1.y x2, y2 =", "are green if pipeindex == 0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(),", "SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume slider(defaultValue, maximum, minimum, Xposition, Yposition) SLIDER", "pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe sprites pipeindex = random.randint(0, len(PIPES_LIST)", "if the user plays the fury mode FURYMODE = False EASYMODE = False", "lHitmask = HITMASKS['pipe'][0] # if bird collided with pipe lCollide = pixelCollision(playerRect, lPipeRect,", "sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds", "playerMinVelY = -8 # min vel along Y, max ascend speed playerAccY =", "and pipe hitmasks pHitMask = HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] # if bird collided", "upper pipes upperPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH", "and hitmask2[x2+x][y2+y]: return True return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop()", "slider movement due to mouse interaction # Static graphics - slider background #", "# 추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win' in sys.platform: soundExt", "coin in coins: coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask = HITMASKS['player'][pi] cHitMask", "no need to spawn pipes at start if FURYMODE: # list of upper", ") # select random pipe sprites pipeindex = random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe']", "newPipe1[1]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']}, ] #", ": SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex,", "pygame.display.flip() def ask(screen, question): \"ask(screen, question) -> answer\" pygame.font.init() current_string = [] display_box(screen,", "] # 추가된 부분 def getRandomCoin(): \"\"\" returns a randomly generated coin \"\"\"", "# the hit attribute indicates slider movement due to mouse interaction # Static", "# image, sound and hitmask dicts IMAGES, SOUNDS, HITMASKS = {}, {}, {}", "or (event.type == MOUSEBUTTONDOWN and event.button == 1): if playery > -2 *", "# player velocity, max velocity, downward accleration, accleration on flap playerVelY = -9", "# sounds if 'win' in sys.platform: soundExt = '.wav' else: soundExt = '.ogg'", "movement of the slider button. \"\"\" self.val = (pygame.mouse.get_pos()[0] - self.xpos - 10)", "sound and return values for mainGame SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'],", "for coin in coins: coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask = HITMASKS['player'][pi]", "FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0 score = playerIndex = loopIter = 0 playerIndexGen", "SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY))", "the value of playerShm['val'] between 8 and -8\"\"\" if abs(playerShm['val']) == 8: playerShm['dir']", "player['w'], player['h']) coinW = IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for coin in coins: coinRect", "import sys import math import pygame from pygame.locals import * import time WHITE", "{'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH + 280 + (SCREENWIDTH /", "elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update()", "button for welcome screen (with the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha()", "getRandomCoin() coins = [ {'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH +", "% 30 basex = -((-basex + 4) % baseShift) playerShm(playerShmVals) # draw sprites", "with attributes: # {'x': position-x, 'y': position-y, # 'vx': velocity-x, 'vy': velocity-y, #", "pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] = (", "downward accleration, accleration on flap playerVelY = -9 # player's velocity along Y,", "particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1 #", "'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def main(): global SCREEN, FPSCLOCK, SLIDER", "lowerPipes.append(pipes[1]) # check if a pipe must be removed from the list for", "break the pipe so we # must return which pipe is colliding (lower", "effect SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True if player collders with base", "if (loopIter + 1) % 5 == 0: playerIndex = next(playerIndexGen) loopIter =", "welcome screen playerShmVals = {'val': 0, 'dir': 1} # initialize volume for sound", "playerRot } # 추가된 부분 if coinTest[0]: score += 1 SOUNDS['point'].play() coins.pop(0) #", "'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U',", "(pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win' in", "= ypos # y-location on screen self.surf = pygame.surface.Surface((95, 40)) self.hit = False", "uPipe in upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in lowerPipes: if", "coinRect, pHitMask, cHitMask) if cCollide : return [True, False] return [False, False] def", "FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX): particle", "vel along Y, max descend speed playerMinVelY = -8 # min vel along", "and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap sound and return", "<= 127: current_string.append(chr(inkey)) display_box(screen, question + \": \" + \"\".join(current_string)) return \"\".join(current_string) class", "0, 255) TRANS = (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH =", "player y shift if playery + playerHeight < BASEY - 1: playery +=", "== K_RETURN: break elif inkey == K_MINUS: current_string.append(\"_\") elif inkey <= 127: current_string.append(chr(inkey))", "# list of lowerpipe lowerPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},", "= 0 playerx = int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) /", "pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x:", "== KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or ((event.type ==", "1: return # player y shift if playery + playerHeight < BASEY -", "= False EASYMODE = False score = crashInfo['score'] playerx = SCREENWIDTH * 0.2", "rect = rect1.clip(rect2) if rect.width == 0 or rect.height == 0: return False", "normal mode return [True, False] for lPipe in lowerPipes: # pipe rect lPipeRect", "SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject", "+ playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey))", "pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1 # random angle for a minimum", "elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit = False # Move volume slider if SLIDER.hit:", "playerAccY = 2 playerRot = crashInfo['playerRot'] playerVelRot = 7 count=0 gameover = True", "'y': newPipe2[1]['y']}, ] # 추가된 부분 newCoin1 = getRandomCoin() newCoin2 = getRandomCoin() coins", "# 추가된 부분 if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0))", "추가된 부분 for coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes' particles if", "range class Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D',", "= math.pi * .25 angle = random.random() * (aMax - aMin) + aMin", "EASYMODE: DIFFICULTY = 4 # get 2 new pipes to add to upperPipes", "pygame.display.update() gameover = False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject = pygame.font.Font(None,30)", "pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def main(): global", "not crashInfo['groundCrash']: if playerRot > -90: playerRot -= playerVelRot # draw sprites overx", "+ self.mini if self.val < self.mini: self.val = self.mini if self.val > self.maxi:", "and particles if FURYMODE: furymodePipeFrameCounter += 1 # the counter has the max", "playerVelY += playerAccY # rotate only when it's a pipe crash if not", "playerAccY if playerFlapped: playerFlapped = False # more rotation to cover the threshold", "fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() / 2)", "event.key == K_ESCAPE): pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if", "pipe lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if lCollide: # for fury mode", "'index': playerIndex}, upperPipes, lowerPipes) # 추가된 부분 coinTest = checkCoin({'x': playerx, 'y': playery,", "if rect.width == 0 or rect.height == 0: return False x1, y1 =", "0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2) - 102, (screen.get_height() / 2) - 12,", "of the slider button. \"\"\" self.val = (pygame.mouse.get_pos()[0] - self.xpos - 10) /", "# rotate the player if playerRot > -90: playerRot -= playerVelRot # player's", "# select random background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] =", "lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface =", "totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth) / 2 for digit in", "0 or rect.height == 0: return False x1, y1 = rect.x - rect1.x,", "pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된", "not playerFlapped: playerVelY += playerAccY if playerFlapped: playerFlapped = False # more rotation", "= -math.pi * .35 aMax = math.pi * .25 angle = random.random() *", "if (event.type == KEYDOWN and event.key == K_2) or ((event.type == MOUSEBUTTONDOWN and", "= (200, 200, 200) ORANGE = (200, 100, 50) CYAN = (0, 255,", "overy = int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in zip(upperPipes, lowerPipes):", "self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0) def draw(self): \"\"\" Combination of", "coinTest = checkCoin({'x': playerx, 'y': playery, 'index': playerIndex}, coins) if crashTest[0]: # the", "y-location on screen self.surf = pygame.surface.Surface((95, 40)) self.hit = False # the hit", "# rotation threshold playerFlapAcc = -9 # players speed on flapping playerFlapped =", "[5, 30, 80, 5], 0) # dynamic graphics - button surface # self.button_surf", "at right of the fury mode button (8 is right padding) furymodeKeyX =", "SLIDER.hit = True elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit = False # Move volume", "overlaps the score showScore(score) # Player rotation has a threshold visibleRot = playerRotThr", "random.random() * (aMax - aMin) + aMin particle['vx'] = math.cos(angle) * vel particle['vy']", "fury mode if FURYMODE and not crashTest[1]: spawnParticles(particles, crashTest[3]) # remove the pipe", "2) - 102, (screen.get_height() / 2) - 12, 204,24), 1) if len(message) !=", "a copy of the basic slide surface \"\"\" # static surf = self.surf.copy()", "SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된 부분", "add to upperPipes lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) # list", "EASYMODE # index of player to blit on screen playerIndex = 0 playerIndexGen", "'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } elif (event.type == KEYDOWN", "particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW) particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH) particle['i']", "= next(playerIndexGen) loopIter = (loopIter + 1) % 30 basex = -((-basex +", "is different than in # normal mode, we add pipes with a \"timer\"", "for score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(),", "soundExt) # volume slider(defaultValue, maximum, minimum, Xposition, Yposition) SLIDER = Slider(0.5, 1, 0,", "SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point'", "pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles for fury mode # pipes are green if", "\"\"\" self.val = (pygame.mouse.get_pos()[0] - self.xpos - 10) / 80 * (self.maxi -", "'Z'} def main(): global SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN =", "flap sound and return values for mainGame SOUNDS['wing'].play() return { 'playery': playery +", "coins = [ {'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH + 280", "pipe \"\"\" # y of gap between upper and lower pipe gapY =", "get_key() if inkey == K_BACKSPACE: current_string = current_string[0:-1] elif inkey == K_RETURN: break", "lower pipe else: lowerPipes.remove(crashTest[3]) else: return { 'y': playery, 'groundCrash': crashTest[1], 'basex': basex,", "- rect2.y for x in xrange(rect.width): for y in xrange(rect.height): if hitmask1[x1+x][y1+y] and", "playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } elif (event.type == KEYDOWN and", "pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] #", "base can maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird", "to blit on screen playerIndex = 0 playerIndexGen = cycle([0, 1, 2, 1])", "int(SCREENHEIGHT * 0.80) # just at right of the fury mode button (8", "+ IMAGES['furymode-key'].get_height() / 2 basex = 0 # amount by which base can", "- playerHeight) # move pipes to left for uPipe in upperPipes: uPipe['x'] +=", "pipeX = SCREENWIDTH + 10 return [ {'x': pipeX, 'y': gapY - pipeHeight},", "2), 'y': newCoin2[0]['y']}, ] pipeVelX = -4 # player velocity, max velocity, downward", "pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0) def draw(self): \"\"\"", "self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0) def draw(self): \"\"\" Combination of static", "xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score))", "textures list} particles = [] # 추가된 부분 coins = [] else: if", "= -((-basex + 100) % baseShift) # rotate the player if playerRot >", "is about to touch left of screen if 0 < upperPipes[0]['x'] < 5:", "# start value self.maxi = maxi # maximum at slider position right self.mini", "'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def main(): global SCREEN,", "xrange(image.get_width()): mask.append([]) for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key(): while 1:", "+ IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <= playerMidPos < pipeMidPos + 4: score", "if (loopIter + 1) % 3 == 0: playerIndex = next(playerIndexGen) loopIter =", "basex = -((-basex + 4) % baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0))", "[False, False] # 추가된 부분 def checkCoin(player, coins): pi = player['index'] player['w'] =", "for coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes' particles if FURYMODE: for", "inkey == K_BACKSPACE: current_string = current_string[0:-1] elif inkey == K_RETURN: break elif inkey", "True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns a hitmask using an image's alpha.\"\"\" mask =", "/ 2 basex = 0 # amount by which base can maximum shift", "playerRot > -90: playerRot -= playerVelRot # player's movement if playerVelY < playerMaxVelY", "of the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2) - 100, (screen.get_height() / 2)", "= movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery'] basex = movementInfo['basex'] baseShift", "user plays the fury mode FURYMODE = False EASYMODE = False # In", "= int(SCREENWIDTH * 0.2), movementInfo['playery'] basex = movementInfo['basex'] baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()", "def draw(self): \"\"\" Combination of static and dynamic graphics in a copy of", "vel particle['vy'] = math.sin(angle) * vel particles.append(particle) # sound effect SOUNDS['hit'].play() def checkCrash(player,", "upperPipes, lowerPipes): \"\"\"returns True if player collders with base or pipes.\"\"\" global FURYMODE", "threshold (calculated in visible rotation) playerRot = 45 playerHeight = IMAGES['player'][playerIndex].get_height() playery +=", "IMAGES, SOUNDS, HITMASKS = {}, {}, {} # True if the user plays", "), # blue bird ( # amount by which base can maximum shift", "or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if (event.type ==", "else: # add new pipes when first pipe is about to touch left", "crashInfo['groundCrash']: SOUNDS['die'].play() while True: for event in pygame.event.get(): if event.type == QUIT or", "1, RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height() / 2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]),", "upper and lower pipe gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY", "= HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] # if bird collided with pipe lCollide =", "= False # more rotation to cover the threshold (calculated in visible rotation)", "basex = crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit and die", "= int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2)", "welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() #", "coinH = IMAGES['coin'].get_height() for coin in coins: coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH)", "player velocity change if playerVelY < 15: playerVelY += playerAccY # rotate only", "playerHeight = IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY = 2 playerRot = crashInfo['playerRot'] playerVelRot", "8: playerShm['dir'] *= -1 if playerShm['dir'] == 1: playerShm['val'] += 1 else: playerShm['val']", "2) -160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the value of playerShm['val']", "# move of button box to correct screen position # screen SCREEN.blit(IMAGES['background'], (0,0))", "{ 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } elif (event.type ==", "\": \" + \"\".join(current_string)) while 1: inkey = get_key() if inkey == K_BACKSPACE:", "at slider position right self.mini = mini # minimum at slider position left", "int(SCREENHEIGHT * 0.74) furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2) furymodey = int(SCREENHEIGHT", "{}, {}, {} # True if the user plays the fury mode FURYMODE", "30 basex = -((-basex + 100) % baseShift) # rotate the player if", "pipes at start if FURYMODE: # list of upper pipes upperPipes = []", "the particle list randomly generated with pipe's rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX,", "\"\"\" The dynamic part; reacts to movement of the slider button. \"\"\" self.val", "/ 2) - 75, (screen.get_height() / 2) - 50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width()", "pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() / 2) -220)) for", "players speed on flapping playerFlapped = False # True when player flaps #", "] # 추가된 부분 newCoin1 = getRandomCoin() newCoin2 = getRandomCoin() coins = [", "= getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin = getRandomCoin() coins.append(newCoin[0]) # remove", "# the \"fury mode\" button for welcome screen (with the key) IMAGES['furymode'] =", "[ {'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH", "pygame.surface.Surface((95, 40)) self.hit = False # the hit attribute indicates slider movement due", "for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score", "crash here crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex}, upperPipes, lowerPipes) #", "if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in", "* .25 angle = random.random() * (aMax - aMin) + aMin particle['vx'] =", "len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites randPlayer =", "pygame from pygame.locals import * import time WHITE = (255, 255, 255) BLACK", "event.type == pygame.MOUSEBUTTONUP: SLIDER.hit = False # Move volume slider if SLIDER.hit: SLIDER.move()", "SLIDER.move() for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode if (event.type ==", "return [True, False, True, uPipe] # normal mode return [True, False] for lPipe", "5th iteration loopIter = 0 playerx = int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT", "return which pipe is colliding (lower or upper) if FURYMODE: return [True, False,", "getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된 부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), ) movementInfo", "uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface,", "gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'],", "for uPipe in upperPipes: # pipe rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)", "want to break the pipe so we # must return which pipe is", "import random import sys import math import pygame from pygame.locals import * import", "pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), )", "sapwn system is different than in # normal mode, we add pipes with", "+ IMAGES['furymode'].get_width() + 8 furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() / 2 basex =", "% baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else", "= IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm for up-down motion on welcome screen", "list of all possible players (tuple of 3 positions of flap) PLAYERS_LIST =", "(loopIter + 1) % 3 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter", "y2 = rect.x - rect2.x, rect.y - rect2.y for x in xrange(rect.width): for", "score showScore(score) # Player rotation has a threshold visibleRot = playerRotThr if playerRot", "# gravity particle['vy'] += playerAccY # remove if the particle is under the", "# 추가된 부분 coins = [] else: if EASYMODE: DIFFICULTY = 4 #", "else: playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height()", "2 new pipes to add to upperPipes lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2", "= IMAGES['pipe'][0].get_height() for uPipe in upperPipes: # pipe rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'],", "/ 2), 'y': newCoin2[0]['y']}, ] pipeVelX = -4 # player velocity, max velocity,", "y in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return False def writeScore(score):", "512 # amount by which base can maximum shift to left PIPEGAPSIZE =", "with a \"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes particles amount", "K_UP)) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return", "0 playerIndexGen = movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery'] basex =", "pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width()", "has a threshold visibleRot = playerRotThr if playerRot <= playerRotThr: visibleRot = playerRot", "lPipe in lowerPipes: # pipe rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) #", "sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for event in pygame.event.get(): if event.type ==", "make first flap sound and return values for mainGame SOUNDS['wing'].play() return { 'playery':", "list} particles = [] # 추가된 부분 coins = [] else: if EASYMODE:", "= crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY = 2 playerRot =", "of screen\"\"\" scoreDigits = [int(x) for x in list(str(score))] totalWidth = 0 #", "player collders with base or pipes.\"\"\" global FURYMODE pi = player['index'] player['w'] =", "showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() /", "pipes furymodePipeFrameCounter = 0 while True: for event in pygame.event.get(): if event.type ==", "playerFlapAcc = -9 # players speed on flapping playerFlapped = False # True", "player['y'] + player['h'] >= BASEY - 1: return [True, True] else: playerRect =", "max vel along Y, max descend speed playerMinVelY = -8 # min vel", "= crashInfo['playerRot'] playerVelRot = 7 count=0 gameover = True basex = crashInfo['basex'] upperPipes,", "baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm for up-down motion on welcome", "pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j:", "BASEY: particles.remove(particle) else: # add new pipes when first pipe is about to", "EASYMODE = True # make first flap sound and return values for mainGame", "len(PIPES_LIST) - 1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes'", "= HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] # if bird collided with pipe uCollide =", "hitmask2[x2+x][y2+y]: return True return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def", "Combination of static and dynamic graphics in a copy of the basic slide", "the fury mode button (8 is right padding) furymodeKeyX = furymodex + IMAGES['furymode'].get_width()", "def playerShm(playerShm): \"\"\"oscillates the value of playerShm['val'] between 8 and -8\"\"\" if abs(playerShm['val'])", "'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird ( # amount by which base can", "= int(SCREENHEIGHT * 0.12) easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT *", "pipe gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY += int(BASEY *", "def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True if player collders with base or pipes.\"\"\"", "question) -> answer\" pygame.font.init() current_string = [] display_box(screen, question + \": \" +", "pipes are green if pipeindex == 0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(),", "up-down motion on welcome screen playerShmVals = {'val': 0, 'dir': 1} # initialize", "= False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy))", "PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange except NameError: xrange = range", "select random pipe sprites pipeindex = random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] = (", "# update (add / remove) pipes and particles if FURYMODE: furymodePipeFrameCounter += 1", "(event.type == KEYDOWN and event.key == K_1) or ((event.type == MOUSEBUTTONDOWN and event.button", "playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1],", "# check for score playerMidPos = playerx + IMAGES['player'][0].get_width() / 2 for pipe", "pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume slider(defaultValue, maximum, minimum, Xposition, Yposition) SLIDER = Slider(0.5,", "speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() #", "= [] # list of particles # a particle is an object with", "+ 4) % baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0],", "{ 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # (1) key", "by which base can maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), #", "messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update()", "maximum shift to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm for", "MOUSEBUTTONDOWN and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True # make first", "SLIDER = Slider(0.5, 1, 0, 190, 0) while True: # select random background", "= get_key() if inkey == K_BACKSPACE: current_string = current_string[0:-1] elif inkey == K_RETURN:", "can maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird (", "if particle['y'] >= BASEY: particles.remove(particle) else: # add new pipes when first pipe", "event.key else: pass def display_box(screen, message): fontobject = pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print", "self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini if self.val", "{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH /", "playerIndex = 0 playerIndexGen = cycle([0, 1, 2, 1]) # iterator used to", "\"\"\"displays score in center of screen\"\"\" scoreDigits = [int(x) for x in list(str(score))]", "ORANGE, (6, 6), 6, 0) def draw(self): \"\"\" Combination of static and dynamic", "pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for pipes HITMASKS['pipe'] =", "(200, 100, 50) CYAN = (0, 255, 255) MAGENTA = (255, 0, 255)", "over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome screen IMAGES['message'] =", "# blue bird ( # amount by which base can maximum shift to", "playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score > TOPFIVE[4][1] and count==0)", "particle['vy'] = math.sin(angle) * vel particles.append(particle) # sound effect SOUNDS['hit'].play() def checkCrash(player, upperPipes,", "coin['y'], coinW, coinH) pHitMask = HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect,", "(255,255,255), ((screen.get_width() / 2) - 102, (screen.get_height() / 2) - 12, 204,24), 1)", "pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi] lHitmask", "1) % 30 basex = -((-basex + 4) % baseShift) playerShm(playerShmVals) # draw", "mode, we add pipes with a \"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35", "or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first", "and not crashTest[1]: spawnParticles(particles, crashTest[3]) # remove the pipe # it's an upper", "= int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex =", "two objects collide and not just their rects\"\"\" rect = rect1.clip(rect2) if rect.width", "190, 0) while True: # select random background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST)", "( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for", "a randomly generated pipe \"\"\" # y of gap between upper and lower", "2) - 10, 200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2) - 102, (screen.get_height()", "int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2) furymodey = int(SCREENHEIGHT * 0.80) # just at", "pygame.quit() sys.exit() if (event.type == KEYDOWN and (event.key == K_SPACE or event.key ==", "pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o:", "# self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0)", "-IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles", "xrange = range class Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C',", "True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard():", "angle for a minimum velocity vel = random.random() * 10 + 5 aMin", "pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r:", "pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() #", "pipe spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if a pipe must", "and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap sound and return values for mainGame FURYMODE", "vel = random.random() * 10 + 5 aMin = -math.pi * .35 aMax", "shm for up-down motion on welcome screen playerShmVals = {'val': 0, 'dir': 1}", "spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if a pipe must be", "( getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows", "KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: pos =", "newPipe2 = getRandomPipe(DIFFICULTY) # list of upper pipes upperPipes = [ {'x': SCREENWIDTH", "* 10 + 5 aMin = -math.pi * .35 aMax = math.pi *", "= getRandomPipe(DIFFICULTY) # list of upper pipes upperPipes = [ {'x': SCREENWIDTH +", "self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move of button box to correct screen", "easymodey = int(SCREENHEIGHT * 0.68) hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT", "min vel along Y, max ascend speed playerAccY = 1 # players downward", "part of pipe BASEY=SCREENHEIGHT * 0.79 # image, sound and hitmask dicts IMAGES,", "추가된 부분 newCoin = getRandomCoin() coins.append(newCoin[0]) # remove first pipe if its out", "SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score", "sys.platform: soundExt = '.wav' else: soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)", "coinY = random.randrange(20, int(BASEY * 0.6)) coinX = SCREENWIDTH + 100 return [", "math import pygame from pygame.locals import * import time WHITE = (255, 255,", "HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if cCollide :", "+ \": \" + \"\".join(current_string)) return \"\".join(current_string) class Slider(): def __init__(self, val, maxi,", "showWelcomeAnimation(): \"\"\"Shows welcome screen animation of flappy bird\"\"\" global FURYMODE, EASYMODE # index", "(255, 50, 50) YELLOW = (255, 255, 0) GREEN = (0, 255, 50)", "particles = [] # 추가된 부분 coins = [] else: if EASYMODE: DIFFICULTY", "0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1],", "pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l:", "30 basex = -((-basex + 4) % baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'],", "+ 280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH + 280 + (SCREENWIDTH / 2), 'y':", "pipeVelX for lPipe in lowerPipes: lPipe['x'] += pipeVelX # 추가된 부분 for coin", "pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def main(): global SCREEN, FPSCLOCK,", "= pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' +", "player flaps # The counter to spawn new pipes furymodePipeFrameCounter = 0 while", "playerAccY # rotate only when it's a pipe crash if not crashInfo['groundCrash']: if", "returns a randomly generated pipe \"\"\" # y of gap between upper and", "upperPipes.remove(uPipe) for lPipe in lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for", "pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)", "movement due to mouse interaction # Static graphics - slider background # self.surf.set_colorkey(BLACK)", "pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):", "start value self.maxi = maxi # maximum at slider position right self.mini =", "int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return [", "getRandomPipe(DIFFICULTY) # list of upper pipes upperPipes = [ {'x': SCREENWIDTH + 200,", "pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter = 0 # pipe", "coinX, 'y': coinY}, ] def showScore(score): \"\"\"displays score in center of screen\"\"\" scoreDigits", "in sys.platform: soundExt = '.wav' else: soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' +", "x in xrange(image.get_width()): mask.append([]) for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key():", ") # 추가된 부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation() crashInfo", "gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY += int(BASEY * 0.2)", "pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h:", "rotation threshold playerFlapAcc = -9 # players speed on flapping playerFlapped = False", "- IMAGES['gameover'].get_width()) / 2) overy = int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe,", "return [False, False] # 추가된 부분 def checkCoin(player, coins): pi = player['index'] player['w']", "HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def", "screen playerIndex = 0 playerIndexGen = cycle([0, 1, 2, 1]) # iterator used", "playerIndexGen, } elif (event.type == KEYDOWN and (event.key == K_SPACE or event.key ==", "# lower pipe ] # 추가된 부분 def getRandomCoin(): \"\"\" returns a randomly", "pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS)", "+= playerAccY if playerFlapped: playerFlapped = False # more rotation to cover the", "self.hit = False # the hit attribute indicates slider movement due to mouse", "return False x1, y1 = rect.x - rect1.x, rect.y - rect1.y x2, y2", "- playerHeight) # player velocity change if playerVelY < 15: playerVelY += playerAccY", "# draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for", "2) - 100, (screen.get_height() / 2) - 10, 200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width()", "self.mini if self.val > self.maxi: self.val = self.maxi if __name__ == '__main__': main()", "in xrange(image.get_width()): mask.append([]) for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key(): while", "(lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score)", "== 1): if playery > -2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped =", "pygame.font.Font(None,30) \"Print a message in a box in the middle of the screen\"", "new pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter = 0 #", "if FURYMODE: for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) #", "minimum at slider position left self.xpos = xpos # x-location on screen self.ypos", "(0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self): \"\"\" The dynamic part; reacts to movement", "K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1): if playery + playerHeight", "'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I',", "'y': coinY}, ] def showScore(score): \"\"\"displays score in center of screen\"\"\" scoreDigits =", "event.key == K_2) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())):", "= 100 + DIFFICULTY * 10 \"\"\" returns a randomly generated pipe \"\"\"", "return [True, False] return [False, False] def pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks if", "downward accleration playerRot = 45 # player's rotation playerVelRot = 3 # angular", "we want to break the pipe so we # must return which pipe", "IMAGES['base'].get_width() - IMAGES['background'].get_width() # no need to spawn pipes at start if FURYMODE:", "basex, 'playerIndexGen': playerIndexGen, } # (1) key for fury mode if (event.type ==", "coinY}, ] def showScore(score): \"\"\"displays score in center of screen\"\"\" scoreDigits = [int(x)", "IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX): particle = {} particle['x'] = random.randint(pipe['x'], pipe['x'] +", "/ 2) - 100, (SCREEN.get_height() / 2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() /", "x1, y1 = rect.x - rect1.x, rect.y - rect1.y x2, y2 = rect.x", "uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe)", "break elif inkey == K_MINUS: current_string.append(\"_\") elif inkey <= 127: current_string.append(chr(inkey)) display_box(screen, question", "True SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, }", "True: # select random background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background']", "player overlaps the score showScore(score) # Player rotation has a threshold visibleRot =", "+ soundExt) # volume slider(defaultValue, maximum, minimum, Xposition, Yposition) SLIDER = Slider(0.5, 1,", "the player hits a pipe in fury mode if FURYMODE and not crashTest[1]:", "threshold playerFlapAcc = -9 # players speed on flapping playerFlapped = False #", "1: event = pygame.event.poll() if event.type == KEYDOWN: return event.key else: pass def", "0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width())", "2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height() / 2)", "of all numbers to be printed for digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width()", "pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW = IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for coin in", "playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the", "= int(SCREENHEIGHT * 0.68) hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT *", "upperPipes lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) # list of upper", "loopIter = (loopIter + 1) % 30 basex = -((-basex + 4) %", "SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } elif", "+ soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume slider(defaultValue, maximum, minimum, Xposition,", "at start if FURYMODE: # list of upper pipes upperPipes = [] #", "SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global", "if not crashInfo['groundCrash']: SOUNDS['die'].play() while True: for event in pygame.event.get(): if event.type ==", "ans shows gameover image\"\"\" global FURYMODE, EASYMODE FURYMODE = False EASYMODE = False", "mini # minimum at slider position left self.xpos = xpos # x-location on", "[True, False] return [False, False] # 추가된 부분 def checkCoin(player, coins): pi =", "+ soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) #", "= 130 # gap between upper and lower part of pipe BASEY=SCREENHEIGHT *", "280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH + 280 + (SCREENWIDTH / 2), 'y': newCoin2[0]['y']},", "pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p:", "return True return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image):", "# base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\" button for", "SOUNDS['point'].play() # playerIndex basex change if (loopIter + 1) % 3 == 0:", "def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 + DIFFICULTY * 10 \"\"\" returns a randomly", "position-x, 'y': position-y, # 'vx': velocity-x, 'vy': velocity-y, # 'i': index in textures", "pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game", "global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for i in", "crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY = 2 playerRot = crashInfo['playerRot']", "i in range(FURYMODE_PARTICLES_MAX): particle = {} particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW) particle['y']", "'assets/sprites/pipe-red.png', ) try: xrange except NameError: xrange = range class Keyboard(object): keys =", "추가된 부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation() crashInfo = mainGame(movementInfo)", "key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha()", "= IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if player crashes into ground if player['y']", "= 288 SCREENHEIGHT = 512 # amount by which base can maximum shift", "SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된 부분 for coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y']))", "200, 200) ORANGE = (200, 100, 50) CYAN = (0, 255, 255) MAGENTA", "particles: # speed particle['x'] += particle['vx'] particle['y'] += particle['vy'] # gravity particle['vy'] +=", "QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if event.type", "get_key(): while 1: event = pygame.event.poll() if event.type == KEYDOWN: return event.key else:", "- 100, (screen.get_height() / 2) - 10)) pygame.display.flip() def ask(screen, question): \"ask(screen, question)", "True basex = crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit and", "movementInfo['playery'] basex = movementInfo['basex'] baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # no need to", "question + \": \" + \"\".join(current_string)) return \"\".join(current_string) class Slider(): def __init__(self, val,", "same as playerFlapped playerMaxVelY = 10 # max vel along Y, max descend", "+ 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']}, ] # 추가된 부분 newCoin1", "len(PLAYERS_LIST) - 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random", "x2, y2 = rect.x - rect2.x, rect.y - rect2.y for x in xrange(rect.width):", "pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin'] =", "'y': playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score': score, 'playerVelY':", "pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] #", "lHitmask) if lCollide: # for fury mode we want to break the pipe", "pipeindex == 0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(),", "+ 8 furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() / 2 basex = 0 #", "= player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'], player['w'],", "inkey <= 127: current_string.append(chr(inkey)) display_box(screen, question + \": \" + \"\".join(current_string)) return \"\".join(current_string)", "-90: playerRot -= playerVelRot # draw sprites overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) /", "screen if 0 < upperPipes[0]['x'] < 5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) #", "of pipe BASEY=SCREENHEIGHT * 0.79 # image, sound and hitmask dicts IMAGES, SOUNDS,", "- 1: return [True, True] else: playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW", "int(BASEY * 0.6)) coinX = SCREENWIDTH + 100 return [ {'x': coinX, 'y':", "upper) if FURYMODE: return [True, False, False, lPipe] # normal mode return [True,", "SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode if (event.type == KEYDOWN and event.key == K_2)", "= pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker'] =", "0 playerIndexGen = cycle([0, 1, 2, 1]) # iterator used to change playerIndex", "= int(SCREENHEIGHT * 0.80) # just at right of the fury mode button", "(1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH = 288 SCREENHEIGHT = 512", "under the ground if particle['y'] >= BASEY: particles.remove(particle) else: # add new pipes", "player crashes into ground if player['y'] + player['h'] >= BASEY - 1: return", "int(SCREENHEIGHT * 0.68) hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT * 0.74)", "K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1): if playery > -2", "== KEYDOWN: return event.key else: pass def display_box(screen, message): fontobject = pygame.font.Font(None,18) fontobject1", "playerMaxVelY = 10 # max vel along Y, max descend speed playerMinVelY =", "IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW = IMAGES['coin'].get_width()", "== 1: playerShm['val'] += 1 else: playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE =", "'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X',", "alpha.\"\"\" mask = [] for x in xrange(image.get_width()): mask.append([]) for y in xrange(image.get_height()):", "# draw sprites overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2) overy = int(SCREENHEIGHT", "first flap sound and return values for mainGame SOUNDS['wing'].play() return { 'playery': playery", "IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT * 0.68) hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey =", "-= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 + DIFFICULTY * 10 \"\"\" returns", "if FURYMODE: # list of upper pipes upperPipes = [] # list of", "rotation to cover the threshold (calculated in visible rotation) playerRot = 45 playerHeight", "'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G',", "IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury", "1) % 30 basex = -((-basex + 100) % baseShift) # rotate the", "for up-down motion on welcome screen playerShmVals = {'val': 0, 'dir': 1} #", "be removed from the list for uPipe in upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width():", "if FURYMODE: return [True, False, True, uPipe] # normal mode return [True, False]", "# dynamic graphics - button surface # self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS)", "basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score': score, 'playerVelY': playerVelY, 'playerRot': playerRot } #", "if (score > TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False", "particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1 # random angle for a minimum velocity", "SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score so player overlaps the score showScore(score) #", "+ 4: score += 1 SOUNDS['point'].play() # playerIndex basex change if (loopIter +", "pHitMask = HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if", "pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(),", "numbers to be printed for digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset =", "mode, the pipe sapwn system is different than in # normal mode, we", "playerRot = crashInfo['playerRot'] playerVelRot = 7 count=0 gameover = True basex = crashInfo['basex']", "rotate only when it's a pipe crash if not crashInfo['groundCrash']: if playerRot >", "= HITMASKS['pipe'][0] # if bird collided with pipe lCollide = pixelCollision(playerRect, lPipeRect, pHitMask,", "mask = [] for x in xrange(image.get_width()): mask.append([]) for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3]))", "x-location on screen self.ypos = ypos # y-location on screen self.surf = pygame.surface.Surface((95,", "value self.maxi = maxi # maximum at slider position right self.mini = mini", "at slider position left self.xpos = xpos # x-location on screen self.ypos =", "= 0 score = playerIndex = loopIter = 0 playerIndexGen = movementInfo['playerIndexGen'] playerx,", "uPipe['y'])) for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된 부분 for coin", "(loopIter + 1) % 30 basex = -((-basex + 4) % baseShift) playerShm(playerShmVals)", "lowerpipe lowerPipes = [] # list of particles # a particle is an", "# list of all possible players (tuple of 3 positions of flap) PLAYERS_LIST", "y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key(): while 1: event = pygame.event.poll()", "playerFlapped: playerFlapped = False # more rotation to cover the threshold (calculated in", "1, (255,255,255)), ((screen.get_width() / 2) - 75, (screen.get_height() / 2) - 50)) screen.blit(fontobject.render(message,", "# player's rotation playerVelRot = 3 # angular speed playerRotThr = 20 #", "== K_BACKSPACE: current_string = current_string[0:-1] elif inkey == K_RETURN: break elif inkey ==", "PIPEGAPSIZE = 130 # gap between upper and lower part of pipe BASEY=SCREENHEIGHT", "gameover image\"\"\" global FURYMODE, EASYMODE FURYMODE = False EASYMODE = False score =", "shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png',", "i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height() /", "4 # get 2 new pipes to add to upperPipes lowerPipes list newPipe1", "rect2.y for x in xrange(rect.width): for y in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:", "# if player crashes into ground if player['y'] + player['h'] >= BASEY -", "# self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5], 0) # dynamic graphics -", "TRANS = (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH = 288 SCREENHEIGHT", "IMAGES['coin'].get_height() for coin in coins: coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask =", "= False # Move volume slider if SLIDER.hit: SLIDER.move() for sounds in SOUNDS:", "SLIDER.hit: SLIDER.move() for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode if (event.type", "which base can maximum shift to left PIPEGAPSIZE = 130 # gap between", "+ 1) % 5 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter +", "box in the middle of the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2) -", "the pipe so we # must return which pipe is colliding (lower or", "2, 1]) # iterator used to change playerIndex after every 5th iteration loopIter", "True if player collders with base or pipes.\"\"\" global FURYMODE pi = player['index']", "< BASEY - 1: playery += min(playerVelY, BASEY - playery - playerHeight) #", "- rect1.x, rect.y - rect1.y x2, y2 = rect.x - rect2.x, rect.y -", "= IMAGES['player'][0].get_height() # if player crashes into ground if player['y'] + player['h'] >=", "102, (screen.get_height() / 2) - 12, 204,24), 1) if len(message) != 0: screen.blit(fontobject1.render(\"HIGH", "'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird ( # amount by which base", "checkCoin(player, coins): pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect =", "- IMAGES['background'].get_width() # player shm for up-down motion on welcome screen playerShmVals =", "playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the player down ans shows gameover image\"\"\"", "'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png',", "= [] # 추가된 부분 coins = [] else: if EASYMODE: DIFFICULTY =", "-9 # player's velocity along Y, default same as playerFlapped playerMaxVelY = 10", "# particles for particle in particles: # speed particle['x'] += particle['vx'] particle['y'] +=", "= pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0) def draw(self):", "] def showScore(score): \"\"\"displays score in center of screen\"\"\" scoreDigits = [int(x) for", "= IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe in upperPipes: # pipe rect uPipeRect", "SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() / 2) -220)) for i in", "it's a pipe crash if not crashInfo['groundCrash']: if playerRot > -90: playerRot -=", "self.mini if self.val < self.mini: self.val = self.mini if self.val > self.maxi: self.val", "= getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if a pipe must be removed from", "\"\"\" returns a randomly generated pipe \"\"\" # y of gap between upper", "rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask", "= pygame.event.poll() if event.type == KEYDOWN: return event.key else: pass def display_box(screen, message):", "hit attribute indicates slider movement due to mouse interaction # Static graphics -", "def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns a hitmask using", "SCREENWIDTH + 280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH + 280 + (SCREENWIDTH / 2),", "추가된 부분 coins = [] else: if EASYMODE: DIFFICULTY = 4 # get", "pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks if two objects collide and not just their", "collders with base or pipes.\"\"\" global FURYMODE pi = player['index'] player['w'] = IMAGES['player'][0].get_width()", "부분 newCoin = getRandomCoin() coins.append(newCoin[0]) # remove first pipe if its out of", "1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH = 288 SCREENHEIGHT = 512 #", "def display_box(screen, message): fontobject = pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print a message in", "= 20 # rotation threshold playerFlapAcc = -9 # players speed on flapping", "def __init__(self, val, maxi, mini, xpos, ypos): self.val = val # start value", "from itertools import cycle from operator import itemgetter import random import sys import", "-90: playerRot -= playerVelRot # player's movement if playerVelY < playerMaxVelY and not", "slider button. \"\"\" self.val = (pygame.mouse.get_pos()[0] - self.xpos - 10) / 80 *", "playerShmVals = {'val': 0, 'dir': 1} # initialize volume for sound in SOUNDS:", "(uPipe['x'], uPipe['y'])) for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된 부분 for", "fury mode button (8 is right padding) furymodeKeyX = furymodex + IMAGES['furymode'].get_width() +", "for particle in particles: # speed particle['x'] += particle['vx'] particle['y'] += particle['vy'] #", "left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ),", "playery += min(playerVelY, BASEY - playery - playerHeight) # move pipes to left", "scoreDigits = [int(x) for x in list(str(score))] totalWidth = 0 # total width", "uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask =", "a threshold visibleRot = playerRotThr if playerRot <= playerRotThr: visibleRot = playerRot playerSurface", "# red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird ( #", "each pipe hit FURYMODE_PARTICLES_MAX = 48 # list of all possible players (tuple", "< 15: playerVelY += playerAccY # rotate only when it's a pipe crash", "부분 coins = [] else: if EASYMODE: DIFFICULTY = 4 # get 2", "getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 + DIFFICULTY * 10 \"\"\" returns a randomly generated", "zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface", "on flapping playerFlapped = False # True when player flaps # The counter", "basex, 'playerIndexGen': playerIndexGen, } # adjust playery, playerIndex, basex if (loopIter + 1)", "(255,255,255)), ((screen.get_width() / 2) - 75, (screen.get_height() / 2) - 50)) screen.blit(fontobject.render(message, 1,", "(playerx,playery)) #showScore(score) if (score > TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover", "playerHeight >= BASEY - 1: return # player y shift if playery +", "int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'],", "value, we must spawn new pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset", "bird collided with pipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if uCollide: #", "import itemgetter import random import sys import math import pygame from pygame.locals import", "playerMaxVelY and not playerFlapped: playerVelY += playerAccY if playerFlapped: playerFlapped = False #", "(SCREENWIDTH / 2), 'y': newCoin2[0]['y']}, ] pipeVelX = -4 # player velocity, max", "removed from the list for uPipe in upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe)", "{'x': coinX, 'y': coinY}, ] def showScore(score): \"\"\"displays score in center of screen\"\"\"", "# players speed on flapping playerFlapped = False # True when player flaps", "lPipe['y'])) # 추가된 부분 for coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes'", "to left for uPipe in upperPipes: uPipe['x'] += pipeVelX for lPipe in lowerPipes:", "= False # In fury mode, the pipe sapwn system is different than", "\"\"\" # y of gap between upper and lower pipe gapY = random.randrange(0,", "position # screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self): \"\"\" The dynamic", "screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self): \"\"\" The dynamic part; reacts", "SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS)", "gravity particle['vy'] += playerAccY # remove if the particle is under the ground", "1, 2, 1]) # iterator used to change playerIndex after every 5th iteration", "when player flaps # The counter to spawn new pipes furymodePipeFrameCounter = 0", "pygame.locals import * import time WHITE = (255, 255, 255) BLACK = (0,", "return mask def get_key(): while 1: event = pygame.event.poll() if event.type == KEYDOWN:", "mainGame FURYMODE = True SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex,", "Xoffset = (SCREENWIDTH - totalWidth) / 2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset,", "+ 75, (SCREEN.get_height() / 2) -160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates", "random pipe sprites pipeindex = random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] = ( pygame.transform.rotate(", "print score so player overlaps the score showScore(score) # Player rotation has a", "(Xoffset, SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\" Add paticles", "pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10, 200,20),", "with base or pipes.\"\"\" global FURYMODE pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h']", "to spawn pipes at start if FURYMODE: # list of upper pipes upperPipes", "# 'vx': velocity-x, 'vy': velocity-y, # 'i': index in textures list} particles =", "= crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit and die sounds", "pipeVelX # 추가된 부분 for coin in coins: coin['x'] += pipeVelX # update", "int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH", "the pipe sapwn system is different than in # normal mode, we add", "(particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score so player overlaps the score", "== KEYDOWN and event.key == K_2) or ((event.type == MOUSEBUTTONDOWN and event.button ==", "gapY + PIPEGAPSIZE}, # lower pipe ] # 추가된 부분 def getRandomCoin(): \"\"\"", "playerIndex = next(playerIndexGen) loopIter = (loopIter + 1) % 30 basex = -((-basex", "/ 2) - 100, (screen.get_height() / 2) - 10, 200,20), 0) pygame.draw.rect(screen, (255,255,255),", "for x in list(str(score))] totalWidth = 0 # total width of all numbers", "= (255, 255, 0) GREEN = (0, 255, 50) BLUE = (50, 50,", "list of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of pipes", "DIFFICULTY = 4 # get 2 new pipes to add to upperPipes lowerPipes", "SLIDER.hit = False # Move volume slider if SLIDER.hit: SLIDER.move() for sounds in", "= pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi]", "def get_key(): while 1: event = pygame.event.poll() if event.type == KEYDOWN: return event.key", "((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10)) pygame.display.flip() def ask(screen,", "can maximum shift to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm", "'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def main():", "== 1): if playery + playerHeight >= BASEY - 1: return # player", "(screen.get_height() / 2) - 10)) pygame.display.flip() def ask(screen, question): \"ask(screen, question) -> answer\"", "[] for x in xrange(image.get_width()): mask.append([]) for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask", "K_MINUS: current_string.append(\"_\") elif inkey <= 127: current_string.append(chr(inkey)) display_box(screen, question + \": \" +", "pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t:", "= IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW =", "bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird ( # amount by", "# max vel along Y, max descend speed playerMinVelY = -8 # min", "= 35 # pipes particles amount (for each pipe) FURYMODE_PARTICLES = 8 #", "(calculated in visible rotation) playerRot = 45 playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY,", "/ 2) - 10)) pygame.display.flip() def ask(screen, question): \"ask(screen, question) -> answer\" pygame.font.init()", "= mini # minimum at slider position left self.xpos = xpos # x-location", "touch left of screen if 0 < upperPipes[0]['x'] < 5: newPipe = getRandomPipe(DIFFICULTY)", "crashTest[1], 'basex': basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score': score, 'playerVelY': playerVelY, 'playerRot': playerRot", "Slider(): def __init__(self, val, maxi, mini, xpos, ypos): self.val = val # start", "+ soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh']", "부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win' in sys.platform: soundExt = '.wav'", "+= 1 SOUNDS['point'].play() # playerIndex basex change if (loopIter + 1) % 3", "( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles for fury mode #", "playery, playerIndex, basex if (loopIter + 1) % 5 == 0: playerIndex =", "if playery + playerHeight >= BASEY - 1: return # player y shift", "# Move volume slider if SLIDER.hit: SLIDER.move() for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2)", "mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome screen animation of flappy bird\"\"\" global FURYMODE,", "playery, 'index': playerIndex}, coins) if crashTest[0]: # the player hits a pipe in", "lPipe in lowerPipes: lPipe['x'] += pipeVelX # 추가된 부분 for coin in coins:", "= (loopIter + 1) % 30 basex = -((-basex + 100) % baseShift)", "coins: coin['x'] += pipeVelX # update (add / remove) pipes and particles if", "2), 'y': newPipe2[0]['y']}, ] # list of lowerpipe lowerPipes = [ {'x': SCREENWIDTH", "generated pipe \"\"\" # y of gap between upper and lower pipe gapY", "IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX): particle = {} particle['x'] =", "and dynamic graphics in a copy of the basic slide surface \"\"\" #", "pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over", "Player rotation has a threshold visibleRot = playerRotThr if playerRot <= playerRotThr: visibleRot", "newPipe2[0]['y']}, ] # list of lowerpipe lowerPipes = [ {'x': SCREENWIDTH + 200,", "while 1: event = pygame.event.poll() if event.type == KEYDOWN: return event.key else: pass", "hit FURYMODE_PARTICLES_MAX = 48 # list of all possible players (tuple of 3", "< 5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin = getRandomCoin()", "[] display_box(screen, question + \": \" + \"\".join(current_string)) while 1: inkey = get_key()", "in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height() / 2)", "pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if player crashes", "SCREENHEIGHT = 512 # amount by which base can maximum shift to left", "player sprites randPlayer = random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),", "= True elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit = False # Move volume slider", "-220)) for i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) - 100,", "fontobject = pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print a message in a box in", "SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() / 2) -220)) for i", "pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe sprites pipeindex = random.randint(0, len(PIPES_LIST) -", "# x-location on screen self.ypos = ypos # y-location on screen self.surf =", "부분 if coinTest[0]: score += 1 SOUNDS['point'].play() coins.pop(0) # check for score playerMidPos", "soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] =", "in upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in lowerPipes: if lPipe['x']", "return [ {'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe {'x': pipeX,", "event = pygame.event.poll() if event.type == KEYDOWN: return event.key else: pass def display_box(screen,", "player['h'] = IMAGES['player'][0].get_height() # if player crashes into ground if player['y'] + player['h']", "1, 0, 190, 0) while True: # select random background sprites randBg =", "a hitmask using an image's alpha.\"\"\" mask = [] for x in xrange(image.get_width()):", "cover the threshold (calculated in visible rotation) playerRot = 45 playerHeight = IMAGES['player'][playerIndex].get_height()", "def spawnParticles(particles, pipe): \"\"\" Add paticles to the particle list randomly generated with", "pipeH = IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX): particle = {} particle['x'] = random.randint(pipe['x'],", "players (tuple of 3 positions of flap) PLAYERS_LIST = ( # red bird", "velocity, max velocity, downward accleration, accleration on flap playerVelY = -9 # player's", "pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v:", "particle['x'] += particle['vx'] particle['y'] += particle['vy'] # gravity particle['vy'] += playerAccY # remove", "= int(SCREENHEIGHT * 0.74) furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2) furymodey =", "IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) #", "if player crashes into ground if player['y'] + player['h'] >= BASEY - 1:", "rect.y - rect1.y x2, y2 = rect.x - rect2.x, rect.y - rect2.y for", "x in xrange(rect.width): for y in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True", "return \"\".join(current_string) class Slider(): def __init__(self, val, maxi, mini, xpos, ypos): self.val =", "fury mode FURYMODE = False EASYMODE = False # In fury mode, the", "(self.xpos, self.ypos)) def move(self): \"\"\" The dynamic part; reacts to movement of the", "pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if a pipe must be removed", "255) MAGENTA = (255, 0, 255) TRANS = (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS", "PIPEGAPSIZE = 100 + DIFFICULTY * 10 \"\"\" returns a randomly generated pipe", "not crashTest[1]: spawnParticles(particles, crashTest[3]) # remove the pipe # it's an upper pipe", "= IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW = IMAGES['coin'].get_width() coinH =", "else: lowerPipes.remove(crashTest[3]) else: return { 'y': playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': upperPipes,", "False # more rotation to cover the threshold (calculated in visible rotation) playerRot", "getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome", "hardmodey = int(SCREENHEIGHT * 0.74) furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2) furymodey", "} elif (event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP))", "different than in # normal mode, we add pipes with a \"timer\" (a", "(for each pipe) FURYMODE_PARTICLES = 8 # max particles for each pipe hit", "green if pipeindex == 0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(),", "animation of flappy bird\"\"\" global FURYMODE, EASYMODE # index of player to blit", "playerIndexGen = cycle([0, 1, 2, 1]) # iterator used to change playerIndex after", "QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if (event.type", "pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif event.type == pygame.MOUSEBUTTONUP:", "ypos # y-location on screen self.surf = pygame.surface.Surface((95, 40)) self.hit = False #", "(screen.get_height() / 2) - 50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() / 2) - 100,", "pHitMask, uHitmask) if uCollide: # for fury mode we want to break the", "playerVelY = crashInfo['playerVelY'] playerAccY = 2 playerRot = crashInfo['playerRot'] playerVelRot = 7 count=0", "K_RETURN: break elif inkey == K_MINUS: current_string.append(\"_\") elif inkey <= 127: current_string.append(chr(inkey)) display_box(screen,", "# amount by which base can maximum shift to left baseShift = IMAGES['base'].get_width()", "# play hit and die sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play() while True:", "event.key == K_1) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())):", "( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된 부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), )", "soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)", "== True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def", "rect1.clip(rect2) if rect.width == 0 or rect.height == 0: return False x1, y1", "pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if lCollide: # for fury mode we want to", "'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P',", ">= BASEY - 1: return # player y shift if playery + playerHeight", "pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe in upperPipes: # pipe rect", "[True, False] for lPipe in lowerPipes: # pipe rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'],", "pipes with a \"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes particles", "in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe):", "100, 50) CYAN = (0, 255, 255) MAGENTA = (255, 0, 255) TRANS", "coinH) pHitMask = HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask)", "threshold visibleRot = playerRotThr if playerRot <= playerRotThr: visibleRot = playerRot playerSurface =", "IMAGES['furymode'].get_width() + 8 furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() / 2 basex = 0", "player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h'])", "The counter to spawn new pipes furymodePipeFrameCounter = 0 while True: for event", "= 1 # players downward accleration playerRot = 45 # player's rotation playerVelRot", "showScore(score): \"\"\"displays score in center of screen\"\"\" scoreDigits = [int(x) for x in", "KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or ((event.type == MOUSEBUTTONDOWN", "0) while True: # select random background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) -", "sounds if 'win' in sys.platform: soundExt = '.wav' else: soundExt = '.ogg' SOUNDS['die']", "crashInfo['groundCrash']: if playerRot > -90: playerRot -= playerVelRot # draw sprites overx =", "of the screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분 if", "(coin['x'], coin['y'])) # pipes' particles if FURYMODE: for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'],", "pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2) - 102, (screen.get_height() / 2) - 12, 204,24),", "2) -132, (SCREEN.get_height() / 2) -220)) for i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1,", "particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score so", "is colliding (lower or upper) if FURYMODE: return [True, False, True, uPipe] #", "draw(self): \"\"\" Combination of static and dynamic graphics in a copy of the", "FURYMODE, EASYMODE FURYMODE = False EASYMODE = False score = crashInfo['score'] playerx =", "IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe in upperPipes: # pipe rect uPipeRect =", "True] else: playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH =", "int(SCREENWIDTH * 0.2), movementInfo['playery'] basex = movementInfo['basex'] baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() #", "sprites pipeindex = random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),", "= pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW = IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for coin", "pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(),", "lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if", "# no need to spawn pipes at start if FURYMODE: # list of", "= IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return [ {'x': pipeX, 'y': gapY", "{ 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # adjust playery,", "score so player overlaps the score showScore(score) # Player rotation has a threshold", "- totalWidth) / 2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))", "score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(),", "pHitMask, lHitmask) if lCollide: # for fury mode we want to break the", "SCREENWIDTH * 0.2 playery = crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY", "rotate the player if playerRot > -90: playerRot -= playerVelRot # player's movement", "- slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5], 0) #", "if crashTest[0]: # the player hits a pipe in fury mode if FURYMODE", "False # Move volume slider if SLIDER.hit: SLIDER.move() for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val)", "3 # angular speed playerRotThr = 20 # rotation threshold playerFlapAcc = -9", "particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score so player overlaps the score showScore(score)", "remove if the particle is under the ground if particle['y'] >= BASEY: particles.remove(particle)", "and event.button == 1): if playery > -2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc", "TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0))", "button box to correct screen position # screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos))", "playerVelRot # draw sprites overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2) overy =", "event.button == 1): if playery + playerHeight >= BASEY - 1: return #", "HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for player HITMASKS['player'] = (", "baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # no need to spawn pipes at start", "pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles for fury mode # pipes", "= random.random() * 10 + 5 aMin = -math.pi * .35 aMax =", "# make first flap sound and return values for mainGame SOUNDS['wing'].play() return {", "add new pipes when first pipe is about to touch left of screen", "vel along Y, max ascend speed playerAccY = 1 # players downward accleration", "5 aMin = -math.pi * .35 aMax = math.pi * .25 angle =", "checkCoin({'x': playerx, 'y': playery, 'index': playerIndex}, coins) if crashTest[0]: # the player hits", "particles if FURYMODE: for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY))", "{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']}, ] # 추가된", "if playery + playerHeight < BASEY - 1: playery += min(playerVelY, BASEY -", "amount by which base can maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ),", "-8 # min vel along Y, max ascend speed playerAccY = 1 #", "self.ypos) # move of button box to correct screen position # screen SCREEN.blit(IMAGES['background'],", "crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's a lower pipe else: lowerPipes.remove(crashTest[3]) else: return {", "IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if player crashes into ground if player['y'] +", "Y, default same as playerFlapped playerMaxVelY = 10 # max vel along Y,", "(255, 255, 0) GREEN = (0, 255, 50) BLUE = (50, 50, 255)", "+ playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # (1) key for fury mode", "KEYDOWN: return event.key else: pass def display_box(screen, message): fontobject = pygame.font.Font(None,18) fontobject1 =", "= pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() / 2) -220))", "In fury mode, the pipe sapwn system is different than in # normal", "flap) PLAYERS_LIST = ( # red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), #", "(event.key == K_SPACE or event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button", "if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit = False #", "(aMax - aMin) + aMin particle['vx'] = math.cos(angle) * vel particle['vy'] = math.sin(angle)", "def main(): global SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH,", "return values for mainGame SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex,", "pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)", "SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume slider(defaultValue,", "- 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe", "particle['vx'] particle['y'] += particle['vy'] # gravity particle['vy'] += playerAccY # remove if the", "maximum, minimum, Xposition, Yposition) SLIDER = Slider(0.5, 1, 0, 190, 0) while True:", "in textures list} particles = [] # 추가된 부분 coins = [] else:", "for lPipe in lowerPipes: lPipe['x'] += pipeVelX # 추가된 부분 for coin in", "FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2)", "추가된 부분 for coin in coins: coin['x'] += pipeVelX # update (add /", "soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] =", "next(playerIndexGen) loopIter = (loopIter + 1) % 30 basex = -((-basex + 4)", "1 # players downward accleration playerRot = 45 # player's rotation playerVelRot =", "False, False, lPipe] # normal mode return [True, False] return [False, False] #", "200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2) - 102, (screen.get_height() / 2) -", "False # In fury mode, the pipe sapwn system is different than in", "1 # random angle for a minimum velocity vel = random.random() * 10", "= player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if player crashes into", "flap playerVelY = -9 # player's velocity along Y, default same as playerFlapped", "for each pipe hit FURYMODE_PARTICLES_MAX = 48 # list of all possible players", "= '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point']", "- 102, (screen.get_height() / 2) - 12, 204,24), 1) if len(message) != 0:", "GREEN = (0, 255, 50) BLUE = (50, 50, 255) GREY = (200,", "pygame.display.set_caption('Flappy Bird') # numbers sprites for score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(),", "the counter has the max value, we must spawn new pipes if furymodePipeFrameCounter", "def showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height()", "sys import math import pygame from pygame.locals import * import time WHITE =", "copy of the basic slide surface \"\"\" # static surf = self.surf.copy() #", "# message sprite for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite", "surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move of button box to correct screen position", "move(self): \"\"\" The dynamic part; reacts to movement of the slider button. \"\"\"", "inkey == K_RETURN: break elif inkey == K_MINUS: current_string.append(\"_\") elif inkey <= 127:", "SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES,", "FURYMODE_PARTICLES = 8 # max particles for each pipe hit FURYMODE_PARTICLES_MAX = 48", "colliding (lower or upper) if FURYMODE: return [True, False, True, uPipe] # normal", "(50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the value of playerShm['val'] between 8 and", "screen\"\"\" scoreDigits = [int(x) for x in list(str(score))] totalWidth = 0 # total", "possible players (tuple of 3 positions of flap) PLAYERS_LIST = ( # red", "aMin particle['vx'] = math.cos(angle) * vel particle['vy'] = math.sin(angle) * vel particles.append(particle) #", "/ 2) - 102, (screen.get_height() / 2) - 12, 204,24), 1) if len(message)", "SCREENWIDTH + 100 return [ {'x': coinX, 'y': coinY}, ] def showScore(score): \"\"\"displays", "+= pipeVelX # update (add / remove) pipes and particles if FURYMODE: furymodePipeFrameCounter", "pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]),", "MOUSEBUTTONDOWN and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery': playery +", "= {'val': 0, 'dir': 1} # initialize volume for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val)", "KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if (event.type == KEYDOWN and (event.key", "digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles,", "'i': index in textures list} particles = [] # 추가된 부분 coins =", "to upperPipes lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) # list of", "SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE,", "SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score > TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update()", "left for uPipe in upperPipes: uPipe['x'] += pipeVelX for lPipe in lowerPipes: lPipe['x']", "+ IMAGES['player'][0].get_width() / 2 for pipe in upperPipes: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width()", "if uCollide: # for fury mode we want to break the pipe so", "부분 def checkCoin(player, coins): pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height()", "True, uPipe] # normal mode return [True, False] for lPipe in lowerPipes: #", "[] else: if EASYMODE: DIFFICULTY = 4 # get 2 new pipes to", "if inkey == K_BACKSPACE: current_string = current_string[0:-1] elif inkey == K_RETURN: break elif", "return [True, False] return [False, False] # 추가된 부분 def checkCoin(player, coins): pi", ": return [True, False] return [False, False] def pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks", "if self.val < self.mini: self.val = self.mini if self.val > self.maxi: self.val =", "== MOUSEBUTTONDOWN and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery': playery", "pygame.K_z: 'Z'} def main(): global SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN", "pipe if its out of the screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0)", "'y': newPipe1[1]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']}, ]", "sound and hitmask dicts IMAGES, SOUNDS, HITMASKS = {}, {}, {} # True", "basex = movementInfo['basex'] baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # no need to spawn", "+ soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing']", "shift to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm for up-down", "visibleRot = playerRotThr if playerRot <= playerRotThr: visibleRot = playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex],", "for digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth) /", "SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX,", "33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move of button box", "draw sprites overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2) overy = int(SCREENHEIGHT *", "= furymodey + IMAGES['furymode-key'].get_height() / 2 basex = 0 # amount by which", "else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), )", "= movementInfo['basex'] baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # no need to spawn pipes", "self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5], 0) # dynamic graphics - button", "SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes' particles if FURYMODE: for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']],", "# amount by which base can maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png',", "change if playerVelY < 15: playerVelY += playerAccY # rotate only when it's", "key for easymode if (event.type == KEYDOWN and event.key == K_2) or ((event.type", "0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width() / 2) - 75, (screen.get_height() / 2)", "rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask", "slider position left self.xpos = xpos # x-location on screen self.ypos = ypos", "+= IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\" Add paticles to the particle list randomly", "the threshold (calculated in visible rotation) playerRot = 45 playerHeight = IMAGES['player'][playerIndex].get_height() playery", "( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] =", "((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery':", "itertools import cycle from operator import itemgetter import random import sys import math", "3 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter + 1) % 30", "Move volume slider if SLIDER.hit: SLIDER.move() for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key", "randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player", "== K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1): if playery +", "5], 0) # dynamic graphics - button surface # self.button_surf = pygame.surface.Surface((15, 15))", "crashInfo['lowerPipes'] # play hit and die sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play() while", "'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L',", "particles if FURYMODE: furymodePipeFrameCounter += 1 # the counter has the max value,", "min(playerVelY, BASEY - playery - playerHeight) # player velocity change if playerVelY <", "self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move of button box to correct screen position #", "for uPipe in upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in lowerPipes:", "\"\".join(current_string)) return \"\".join(current_string) class Slider(): def __init__(self, val, maxi, mini, xpos, ypos): self.val", "the player if playerRot > -90: playerRot -= playerVelRot # player's movement if", "== QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if", "(event.type == MOUSEBUTTONDOWN and event.button == 1): if playery > -2 * IMAGES['player'][0].get_height():", "when first pipe is about to touch left of screen if 0 <", "+= playerAccY # rotate only when it's a pipe crash if not crashInfo['groundCrash']:", "random import sys import math import pygame from pygame.locals import * import time", "and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen,", "= math.sin(angle) * vel particles.append(particle) # sound effect SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes):", "between 8 and -8\"\"\" if abs(playerShm['val']) == 8: playerShm['dir'] *= -1 if playerShm['dir']", "'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M',", "{'x': SCREENWIDTH + 280 + (SCREENWIDTH / 2), 'y': newCoin2[0]['y']}, ] pipeVelX =", "generated coin \"\"\" coinY = random.randrange(20, int(BASEY * 0.6)) coinX = SCREENWIDTH +", "#showScore(score) if (score > TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover =", "crashes into ground if player['y'] + player['h'] >= BASEY - 1: return [True,", "(basex, BASEY)) # print score so player overlaps the score showScore(score) # Player", "update (add / remove) pipes and particles if FURYMODE: furymodePipeFrameCounter += 1 #", "current_string.append(chr(inkey)) display_box(screen, question + \": \" + \"\".join(current_string)) return \"\".join(current_string) class Slider(): def", "inkey == K_MINUS: current_string.append(\"_\") elif inkey <= 127: current_string.append(chr(inkey)) display_box(screen, question + \":", "int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT * 0.68) hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2)", "pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites", "pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(),", "we add pipes with a \"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 #", ">= BASEY - 1: return [True, True] else: playerRect = pygame.Rect(player['x'], player['y'], player['w'],", "the slider button. \"\"\" self.val = (pygame.mouse.get_pos()[0] - self.xpos - 10) / 80", "* 0.6 - PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX", "pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for player HITMASKS['player'] =", "playerShm['val'] += 1 else: playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 +", "upperPipes, lowerPipes) # 추가된 부분 coinTest = checkCoin({'x': playerx, 'y': playery, 'index': playerIndex},", "aMin) + aMin particle['vx'] = math.cos(angle) * vel particle['vy'] = math.sin(angle) * vel", "SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY =", "= checkCrash({'x': playerx, 'y': playery, 'index': playerIndex}, upperPipes, lowerPipes) # 추가된 부분 coinTest", "sprites for score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(),", "pipeX, 'y': gapY - pipeHeight}, # upper pipe {'x': pipeX, 'y': gapY +", "left self.xpos = xpos # x-location on screen self.ypos = ypos # y-location", "- IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT * 0.74) furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) /", "2) messagey = int(SCREENHEIGHT * 0.12) easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey =", "dynamic graphics - button surface # self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf,", "initialize volume for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for event in pygame.event.get():", "if (event.type == KEYDOWN and event.key == K_1) or ((event.type == MOUSEBUTTONDOWN and", "{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']}, ] # list", "+= 1 # the counter has the max value, we must spawn new", "global FURYMODE, EASYMODE FURYMODE = False EASYMODE = False score = crashInfo['score'] playerx", "== 8: playerShm['dir'] *= -1 if playerShm['dir'] == 1: playerShm['val'] += 1 else:", "# True if the user plays the fury mode FURYMODE = False EASYMODE", "= ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]),", "= ( # red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird", "if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width():", "# normal mode return [True, False] for lPipe in lowerPipes: # pipe rect", "'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score': score, 'playerVelY': playerVelY, 'playerRot':", "it's a lower pipe else: lowerPipes.remove(crashTest[3]) else: return { 'y': playery, 'groundCrash': crashTest[1],", "'.wav' else: soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit'", "# game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome screen", "pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(),", "lowerPipes: # pipe rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player and", "select random player sprites randPlayer = random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] = (", "\"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for i", "= playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo):", "- 1 # random angle for a minimum velocity vel = random.random() *", "randomly generated with pipe's rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW =", "- rect2.x, rect.y - rect2.y for x in xrange(rect.width): for y in xrange(rect.height):", "= True SOUNDS['wing'].play() # check for crash here crashTest = checkCrash({'x': playerx, 'y':", "base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\" button for welcome", "self.surf = pygame.surface.Surface((95, 40)) self.hit = False # the hit attribute indicates slider", "score = crashInfo['score'] playerx = SCREENWIDTH * 0.2 playery = crashInfo['y'] playerHeight =", "upper pipes upperPipes = [] # list of lowerpipe lowerPipes = [] #", "if SLIDER.hit: SLIDER.move() for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode if", "minimum, Xposition, Yposition) SLIDER = Slider(0.5, 1, 0, 190, 0) while True: #", "playerRot -= playerVelRot # player's movement if playerVelY < playerMaxVelY and not playerFlapped:", "self.button_rect.move_ip(self.xpos, self.ypos) # move of button box to correct screen position # screen", "playerx = SCREENWIDTH * 0.2 playery = crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY =", "coins.append(newCoin[0]) # remove first pipe if its out of the screen if upperPipes[0]['x']", "255, 255) BLACK = (0, 0, 0) RED = (255, 50, 50) YELLOW", "and -8\"\"\" if abs(playerShm['val']) == 8: playerShm['dir'] *= -1 if playerShm['dir'] == 1:", "draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe", "we must spawn new pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter", "pipeW) particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1", "playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY - playery - playerHeight) # move", "= pygame.surface.Surface((95, 40)) self.hit = False # the hit attribute indicates slider movement", "pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for pipes HITMASKS['pipe'] = (", "+= 1 else: playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 + DIFFICULTY", "1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap sound and return values for mainGame", "random.randint(1, FURYMODE_PARTICLES) - 1 # random angle for a minimum velocity vel =", "attributes: # {'x': position-x, 'y': position-y, # 'vx': velocity-x, 'vy': velocity-y, # 'i':", "+ (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the value of playerShm['val'] between 8", "playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # adjust playery, playerIndex, basex", "(tuple of 3 positions of flap) PLAYERS_LIST = ( # red bird (", "check for score playerMidPos = playerx + IMAGES['player'][0].get_width() / 2 for pipe in", "100 + DIFFICULTY * 10 \"\"\" returns a randomly generated pipe \"\"\" #", "screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분 if coins[0]['x'] <", "mini, xpos, ypos): self.val = val # start value self.maxi = maxi #", "button surface # self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6),", "rect.x - rect2.x, rect.y - rect2.y for x in xrange(rect.width): for y in", "pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q:", "SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex,", "(score > TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000)", "background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5], 0) # dynamic graphics", "dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) #", "부분 for coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes' particles if FURYMODE:", "upperPipes: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <= playerMidPos <", "and not playerFlapped: playerVelY += playerAccY if playerFlapped: playerFlapped = False # more", "= getRandomCoin() coins = [ {'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH", "left of screen if 0 < upperPipes[0]['x'] < 5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0])", "\"\".join(current_string)) while 1: inkey = get_key() if inkey == K_BACKSPACE: current_string = current_string[0:-1]", "volume for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for event in pygame.event.get(): if", "'assets/sprites/yellowbird-downflap.png', ), ) # list of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', )", "'dir': 1} # initialize volume for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for", "playerIndex basex change if (loopIter + 1) % 3 == 0: playerIndex =", "-= playerVelRot # player's movement if playerVelY < playerMaxVelY and not playerFlapped: playerVelY", "# pipe rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) # player and pipe", "(playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'],", "in coins: coin['x'] += pipeVelX # update (add / remove) pipes and particles", "= mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome screen animation of flappy bird\"\"\" global", "coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes' particles if FURYMODE: for particle", "280 + (SCREENWIDTH / 2), 'y': newCoin2[0]['y']}, ] pipeVelX = -4 # player", "answer\" pygame.font.init() current_string = [] display_box(screen, question + \": \" + \"\".join(current_string)) while", "# 추가된 부분 def getRandomCoin(): \"\"\" returns a randomly generated coin \"\"\" coinY", "= True SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen,", "BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score > TOPFIVE[4][1]", "middle of the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2) - 100, (screen.get_height() /", "4: score += 1 SOUNDS['point'].play() # playerIndex basex change if (loopIter + 1)", "particle = {} particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW) particle['y'] = random.randint(pipe['y'], pipe['y']", "lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) # 추가된 부분 for coin in coins:", "/ 2) - 100, (screen.get_height() / 2) - 10)) pygame.display.flip() def ask(screen, question):", "particle['y'] >= BASEY: particles.remove(particle) else: # add new pipes when first pipe is", "the list for uPipe in upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe", "+ DIFFICULTY * 10 \"\"\" returns a randomly generated pipe \"\"\" # y", "playerx, 'y': playery, 'index': playerIndex}, upperPipes, lowerPipes) # 추가된 부분 coinTest = checkCoin({'x':", "(screen.get_height() / 2) - 12, 204,24), 1) if len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\",", "else: soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' +", "slider if SLIDER.hit: SLIDER.move() for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode", "showScore(score) # Player rotation has a threshold visibleRot = playerRotThr if playerRot <=", "* 0.6)) coinX = SCREENWIDTH + 100 return [ {'x': coinX, 'y': coinY},", "= int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2) furymodey = int(SCREENHEIGHT * 0.80) # just", "in lowerPipes: lPipe['x'] += pipeVelX # 추가된 부분 for coin in coins: coin['x']", "sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx,", "pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite", "crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome screen animation of flappy bird\"\"\"", "+= playerAccY # remove if the particle is under the ground if particle['y']", "particle in particles: # speed particle['x'] += particle['vx'] particle['y'] += particle['vy'] # gravity", "__init__(self, val, maxi, mini, xpos, ypos): self.val = val # start value self.maxi", "lower part of pipe BASEY=SCREENHEIGHT * 0.79 # image, sound and hitmask dicts", "along Y, max descend speed playerMinVelY = -8 # min vel along Y,", "lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for particle in particles: # speed particle['x']", "and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'],", "# list of lowerpipe lowerPipes = [] # list of particles # a", "BASEY - playery - playerHeight) # player velocity change if playerVelY < 15:", "50, 255) GREY = (200, 200, 200) ORANGE = (200, 100, 50) CYAN", "/ 2) overy = int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in", "5 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter + 1) % 30", "lower pipe ] # 추가된 부분 def getRandomCoin(): \"\"\" returns a randomly generated", "if player['y'] + player['h'] >= BASEY - 1: return [True, True] else: playerRect", "= pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) #", "lowerPipes) # 추가된 부분 coinTest = checkCoin({'x': playerx, 'y': playery, 'index': playerIndex}, coins)", "if 'win' in sys.platform: soundExt = '.wav' else: soundExt = '.ogg' SOUNDS['die'] =", "(255, 0, 255) TRANS = (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH", "= loopIter = 0 playerIndexGen = movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH * 0.2),", "= False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\",", "playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 + DIFFICULTY * 10 \"\"\"", "HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if cCollide : return [True, False]", "surface # self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6,", "system is different than in # normal mode, we add pipes with a", "if cCollide : return [True, False] return [False, False] def pixelCollision(rect1, rect2, hitmask1,", "of button box to correct screen position # screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos,", "pipes upperPipes = [] # list of lowerpipe lowerPipes = [] # list", "global FURYMODE, EASYMODE # index of player to blit on screen playerIndex =", "-= playerVelRot # draw sprites overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2) overy", "pipes to left for uPipe in upperPipes: uPipe['x'] += pipeVelX for lPipe in", "2 for pipe in upperPipes: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if", "Bird') # numbers sprites for score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(),", "False] for lPipe in lowerPipes: # pipe rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW,", "messagey = int(SCREENHEIGHT * 0.12) easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT", "= 0 # pipe spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if", "mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0 score = playerIndex = loopIter", "pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(),", "if not crashInfo['groundCrash']: if playerRot > -90: playerRot -= playerVelRot # draw sprites", "background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select", "just at right of the fury mode button (8 is right padding) furymodeKeyX", "= [ {'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH + 200 +", "[int(x) for x in list(str(score))] totalWidth = 0 # total width of all", "import math import pygame from pygame.locals import * import time WHITE = (255,", "[] # list of lowerpipe lowerPipes = [] # list of particles #", "== KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or (event.type ==", "== KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if (event.type == KEYDOWN and", "/ 2), 'y': newPipe2[1]['y']}, ] # 추가된 부분 newCoin1 = getRandomCoin() newCoin2 =", "# player and pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] # if", "or (event.type == MOUSEBUTTONDOWN and event.button == 1): if playery + playerHeight >=", "= pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume slider(defaultValue, maximum, minimum, Xposition, Yposition) SLIDER =", "# initialize volume for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for event in", ".25 angle = random.random() * (aMax - aMin) + aMin particle['vx'] = math.cos(angle)", "xrange(rect.width): for y in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return False", "[True, True] else: playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH", "(160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey))", "player to blit on screen playerIndex = 0 playerIndexGen = cycle([0, 1, 2,", "[] # list of particles # a particle is an object with attributes:", "screen position # screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self): \"\"\" The", "True SOUNDS['wing'].play() # check for crash here crashTest = checkCrash({'x': playerx, 'y': playery,", "# Static graphics - slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30, 80,", "playerFlapped = False # more rotation to cover the threshold (calculated in visible", "furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY", "+ playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } elif (event.type == KEYDOWN and (event.key", "pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover", "(event.type == KEYDOWN and event.key == K_2) or ((event.type == MOUSEBUTTONDOWN and event.button", "fury mode if (event.type == KEYDOWN and event.key == K_1) or ((event.type ==", "has the max value, we must spawn new pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES:", "playerMidPos = playerx + IMAGES['player'][0].get_width() / 2 for pipe in upperPipes: pipeMidPos =", "flaps # The counter to spawn new pipes furymodePipeFrameCounter = 0 while True:", "pipe in upperPipes: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <=", "playerVelY += playerAccY if playerFlapped: playerFlapped = False # more rotation to cover", "fontobject1 = pygame.font.Font(None,30) \"Print a message in a box in the middle of", "{} # True if the user plays the fury mode FURYMODE = False", "부분 coinTest = checkCoin({'x': playerx, 'y': playery, 'index': playerIndex}, coins) if crashTest[0]: #", "= self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move of button box to correct", "FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes particles amount (for each pipe) FURYMODE_PARTICLES = 8", "while 1: inkey = get_key() if inkey == K_BACKSPACE: current_string = current_string[0:-1] elif", "+= min(playerVelY, BASEY - playery - playerHeight) # move pipes to left for", "pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe ] # 추가된", "event.button == 1): if playery > -2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped", "== FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter = 0 # pipe spawn pipes =", "an upper pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's a lower pipe else:", "SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val']))", "+= int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return", "== 0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(),", "to change playerIndex after every 5th iteration loopIter = 0 playerx = int(SCREENWIDTH", "a \"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes particles amount (for", "(loopIter + 1) % 30 basex = -((-basex + 100) % baseShift) #", "2) - 100, (SCREEN.get_height() / 2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2)", "(uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot)", "pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w:", "coin['y'])) # pipes' particles if FURYMODE: for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y']))", "player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe in upperPipes: # pipe", "SCREENWIDTH = 288 SCREENHEIGHT = 512 # amount by which base can maximum", "pipe rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) # player and pipe hitmasks", "welcome screen animation of flappy bird\"\"\" global FURYMODE, EASYMODE # index of player", "interaction # Static graphics - slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30,", "False EASYMODE = False score = crashInfo['score'] playerx = SCREENWIDTH * 0.2 playery", "if a pipe must be removed from the list for uPipe in upperPipes:", "# the counter has the max value, we must spawn new pipes if", "pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(),", "visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the player down ans", "# sound effect SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True if player collders", "score, 'playerVelY': playerVelY, 'playerRot': playerRot } # 추가된 부분 if coinTest[0]: score +=", "# index of player to blit on screen playerIndex = 0 playerIndexGen =", "+ 200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y':", "= pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the player", "new pipes when first pipe is about to touch left of screen if", "screen self.ypos = ypos # y-location on screen self.surf = pygame.surface.Surface((95, 40)) self.hit", "the basic slide surface \"\"\" # static surf = self.surf.copy() # dynamic pos", "2) - 50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() / 2) - 100, (screen.get_height() /", "((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap", "SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self): \"\"\" The dynamic part; reacts to", "= (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win'", "lCollide: # for fury mode we want to break the pipe so we", "-((-basex + 4) % baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0):", "pipeVelX = -4 # player velocity, max velocity, downward accleration, accleration on flap", "accleration, accleration on flap playerVelY = -9 # player's velocity along Y, default", "angular speed playerRotThr = 20 # rotation threshold playerFlapAcc = -9 # players", "# player shm for up-down motion on welcome screen playerShmVals = {'val': 0,", "SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird')", "event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key", "부분 def getRandomCoin(): \"\"\" returns a randomly generated coin \"\"\" coinY = random.randrange(20,", "{ 'y': playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score': score,", "'playerRot': playerRot } # 추가된 부분 if coinTest[0]: score += 1 SOUNDS['point'].play() coins.pop(0)", "HITMASKS['pipe'][0] # if bird collided with pipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)", "movementInfo['basex'] baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # no need to spawn pipes at", "add pipes with a \"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes", "getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin = getRandomCoin() coins.append(newCoin[0]) # remove first", "[True, False] return [False, False] def pixelCollision(rect1, rect2, hitmask1, hitmask2): \"\"\"Checks if two", "xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key(): while 1: event = pygame.event.poll() if event.type", "= current_string[0:-1] elif inkey == K_RETURN: break elif inkey == K_MINUS: current_string.append(\"_\") elif", "and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True # make first flap sound and return values", "# dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos)", "# random angle for a minimum velocity vel = random.random() * 10 +", "\"\"\"returns True if player collders with base or pipes.\"\"\" global FURYMODE pi =", "list of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange except NameError:", "(lower or upper) if FURYMODE: return [True, False, True, uPipe] # normal mode", "pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit = False", "playerx, 'y': playery, 'index': playerIndex}, coins) if crashTest[0]: # the player hits a", "* vel particle['vy'] = math.sin(angle) * vel particles.append(particle) # sound effect SOUNDS['hit'].play() def", "coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask = HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide", "yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list of backgrounds BACKGROUNDS_LIST", "furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter = 0 # pipe spawn pipes", "pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe in", "# y-location on screen self.surf = pygame.surface.Surface((95, 40)) self.hit = False # the", "of lowerpipe lowerPipes = [] # list of particles # a particle is", "getRandomCoin() newCoin2 = getRandomCoin() coins = [ {'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']},", "pygame.event.poll() if event.type == KEYDOWN: return event.key else: pass def display_box(screen, message): fontobject", "0 < upperPipes[0]['x'] < 5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분", "SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height() / 2) -160 + (50*i)))", "FPS = 30 SCREENWIDTH = 288 SCREENHEIGHT = 512 # amount by which", "for fury mode if (event.type == KEYDOWN and event.key == K_1) or ((event.type", "for lPipe in lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for particle", "IMAGES['player'][0].get_width() / 2 for pipe in upperPipes: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() /", "pipe['y'] + pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1 # random angle for", "2) - 100, (screen.get_height() / 2) - 10)) pygame.display.flip() def ask(screen, question): \"ask(screen,", "pipe) FURYMODE_PARTICLES = 8 # max particles for each pipe hit FURYMODE_PARTICLES_MAX =", "checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True if player collders with base or pipes.\"\"\" global", "particle is an object with attributes: # {'x': position-x, 'y': position-y, # 'vx':", "display_box(screen, message): fontobject = pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print a message in a", "playerVelRot = 7 count=0 gameover = True basex = crashInfo['basex'] upperPipes, lowerPipes =", "15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0) def draw(self): \"\"\" Combination", "= SCREENWIDTH * 0.2 playery = crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY']", "with pipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if uCollide: # for fury", "more rotation to cover the threshold (calculated in visible rotation) playerRot = 45", "(0, 255, 255) MAGENTA = (255, 0, 255) TRANS = (1, 1, 1)", "self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0) def", "if pipeMidPos <= playerMidPos < pipeMidPos + 4: score += 1 SOUNDS['point'].play() #", "pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k:", "screen (with the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha()", "= [int(x) for x in list(str(score))] totalWidth = 0 # total width of", "self.mini: self.val = self.mini if self.val > self.maxi: self.val = self.maxi if __name__", "= (0, 255, 255) MAGENTA = (255, 0, 255) TRANS = (1, 1,", "< upperPipes[0]['x'] < 5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin", "def getHitmask(image): \"\"\"returns a hitmask using an image's alpha.\"\"\" mask = [] for", "'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S',", "slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5], 0) # dynamic", "(screen.get_height() / 2) - 10, 200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() / 2) -", "game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome screen IMAGES['message']", "pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] # if bird collided with", "and pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] # if bird collided", "if playerShm['dir'] == 1: playerShm['val'] += 1 else: playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY):", "or pipes.\"\"\" global FURYMODE pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height()", "False EASYMODE = False # In fury mode, the pipe sapwn system is", "'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q',", "and return values for mainGame SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex':", "try: xrange except NameError: xrange = range class Keyboard(object): keys = {pygame.K_a: 'A',", "coinW = IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for coin in coins: coinRect = pygame.Rect(coin['x'],", "# max particles for each pipe hit FURYMODE_PARTICLES_MAX = 48 # list of", "need to spawn pipes at start if FURYMODE: # list of upper pipes", "speed on flapping playerFlapped = False # True when player flaps # The", "import * import time WHITE = (255, 255, 255) BLACK = (0, 0,", "# playerIndex basex change if (loopIter + 1) % 3 == 0: playerIndex", "newCoin2 = getRandomCoin() coins = [ {'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']}, {'x':", "SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites for score display IMAGES['numbers']", "each pipe) FURYMODE_PARTICLES = 8 # max particles for each pipe hit FURYMODE_PARTICLES_MAX", "which base can maximum shift to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() #", "upperPipes[0]['x'] < 5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin =", "maximum at slider position right self.mini = mini # minimum at slider position", "1: return [True, True] else: playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW =", "start if FURYMODE: # list of upper pipes upperPipes = [] # list", "'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W',", "cHitMask = HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if cCollide : return", "lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분 if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) # draw sprites", "# it's an upper pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's a lower", "from the list for uPipe in upperPipes: if uPipe['x'] < -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for", "= 2 playerRot = crashInfo['playerRot'] playerVelRot = 7 count=0 gameover = True basex", "the max value, we must spawn new pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: #", "event.key == K_ESCAPE): pygame.quit() sys.exit() if (event.type == KEYDOWN and (event.key == K_SPACE", "angle = random.random() * (aMax - aMin) + aMin particle['vx'] = math.cos(angle) *", "-160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height() / 2) -160", "추가된 부분 def checkCoin(player, coins): pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] =", "their rects\"\"\" rect = rect1.clip(rect2) if rect.width == 0 or rect.height == 0:", "# remove the pipe # it's an upper pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1", "pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite", "Xposition, Yposition) SLIDER = Slider(0.5, 1, 0, 190, 0) while True: # select", "in the middle of the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2) - 100,", "((screen.get_width() / 2) - 102, (screen.get_height() / 2) - 12, 204,24), 1) if", "list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) # list of upper pipes upperPipes", "+ (SCREENWIDTH / 2), 'y': newCoin2[0]['y']}, ] pipeVelX = -4 # player velocity,", "255, 255) MAGENTA = (255, 0, 255) TRANS = (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)]", "2) overy = int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in zip(upperPipes,", "on screen playerIndex = 0 playerIndexGen = cycle([0, 1, 2, 1]) # iterator", "'y': newPipe2[0]['y']}, ] # list of lowerpipe lowerPipes = [ {'x': SCREENWIDTH +", "= rect1.clip(rect2) if rect.width == 0 or rect.height == 0: return False x1,", "# pipes are green if pipeindex == 0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(),", "- button surface # self.button_surf = pygame.surface.Surface((15, 15)) self.button_surf.fill(TRANS) self.button_surf.set_colorkey(TRANS) pygame.draw.circle(self.button_surf, ORANGE, (6,", "random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight =", "= pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print a message in a box in the", "counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes particles amount (for each pipe) FURYMODE_PARTICLES =", "else: playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 + DIFFICULTY * 10", "max ascend speed playerAccY = 1 # players downward accleration playerRot = 45", "* 0.2), movementInfo['playery'] basex = movementInfo['basex'] baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # no", "'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R',", "upperPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH + 200", "randomly generated coin \"\"\" coinY = random.randrange(20, int(BASEY * 0.6)) coinX = SCREENWIDTH", "(event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or ((event.type", "sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\" button for welcome screen (with", "pipe crash if not crashInfo['groundCrash']: if playerRot > -90: playerRot -= playerVelRot #", "writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns a hitmask using an", "= (50, 50, 255) GREY = (200, 200, 200) ORANGE = (200, 100,", "[True, False, True, uPipe] # normal mode return [True, False] for lPipe in", "and hitmask dicts IMAGES, SOUNDS, HITMASKS = {}, {}, {} # True if", "# iterator used to change playerIndex after every 5th iteration loopIter = 0", "furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() / 2 basex = 0 # amount by", "if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'],", "crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit and die sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play()", "> -90: playerRot -= playerVelRot # draw sprites overx = int((SCREENWIDTH - IMAGES['gameover'].get_width())", "- rect1.y x2, y2 = rect.x - rect2.x, rect.y - rect2.y for x", "7 count=0 gameover = True basex = crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes']", "\"ask(screen, question) -> answer\" pygame.font.init() current_string = [] display_box(screen, question + \": \"", "the hit attribute indicates slider movement due to mouse interaction # Static graphics", "= 4 # get 2 new pipes to add to upperPipes lowerPipes list", "soundExt = '.wav' else: soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit']", "IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW = IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height()", "pHitMask = HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] # if bird collided with pipe lCollide", "dicts IMAGES, SOUNDS, HITMASKS = {}, {}, {} # True if the user", "(event.key == K_SPACE or event.key == K_UP)) or ((event.type == MOUSEBUTTONDOWN and event.button", "the fury mode FURYMODE = False EASYMODE = False # In fury mode,", "so we # must return which pipe is colliding (lower or upper) if", "/ 2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height() /", "for easymode if (event.type == KEYDOWN and event.key == K_2) or ((event.type ==", "* IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped = True SOUNDS['wing'].play() # check for crash", "particles for particle in particles: # speed particle['x'] += particle['vx'] particle['y'] += particle['vy']", "* (self.maxi - self.mini) + self.mini if self.val < self.mini: self.val = self.mini", "along Y, default same as playerFlapped playerMaxVelY = 10 # max vel along", "its out of the screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된", "coinX = SCREENWIDTH + 100 return [ {'x': coinX, 'y': coinY}, ] def", "= True # make first flap sound and return values for mainGame SOUNDS['wing'].play()", "mouse interaction # Static graphics - slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5,", "= pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin']", "(SCREEN.get_height() / 2) -160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the value", "= int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2) overy = int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0))", "graphics - slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5], 0)", "welcome screen (with the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] =", "showGameOverScreen(crashInfo): \"\"\"crashes the player down ans shows gameover image\"\"\" global FURYMODE, EASYMODE FURYMODE", "surface \"\"\" # static surf = self.surf.copy() # dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33)", "playery > -2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped = True SOUNDS['wing'].play() #", "IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe sprites pipeindex", "current_string = [] display_box(screen, question + \": \" + \"\".join(current_string)) while 1: inkey", "'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png',", "uHitmask) if uCollide: # for fury mode we want to break the pipe", "in a copy of the basic slide surface \"\"\" # static surf =", "12, 204,24), 1) if len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width() /", "on screen self.ypos = ypos # y-location on screen self.surf = pygame.surface.Surface((95, 40))", "random.randint(pipe['x'], pipe['x'] + pipeW) particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH) particle['i'] = random.randint(1,", "-IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for particle in particles: # speed particle['x'] += particle['vx']", "to add to upperPipes lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) #", "coin \"\"\" coinY = random.randrange(20, int(BASEY * 0.6)) coinX = SCREENWIDTH + 100", "# list of upper pipes upperPipes = [ {'x': SCREENWIDTH + 200, 'y':", "getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if a pipe must be removed from the", "= 48 # list of all possible players (tuple of 3 positions of", "hitmask dicts IMAGES, SOUNDS, HITMASKS = {}, {}, {} # True if the", "= random.randint(1, FURYMODE_PARTICLES) - 1 # random angle for a minimum velocity vel", "TOPFIVE.pop() def getHitmask(image): \"\"\"returns a hitmask using an image's alpha.\"\"\" mask = []", "'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O',", "+ 10 return [ {'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe", "pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def", "indicates slider movement due to mouse interaction # Static graphics - slider background", "player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된 부분 HITMASKS['coin'] =", "playery + playerHeight >= BASEY - 1: return # player y shift if", "+= IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth) / 2 for digit in scoreDigits:", "# screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self): \"\"\" The dynamic part;", "== MOUSEBUTTONDOWN and event.button == 1): if playery + playerHeight >= BASEY -", "15: playerVelY += playerAccY # rotate only when it's a pipe crash if", "event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap sound and return values", "velocity-y, # 'i': index in textures list} particles = [] # 추가된 부분", "= pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker", "'y': newCoin1[0]['y']}, {'x': SCREENWIDTH + 280 + (SCREENWIDTH / 2), 'y': newCoin2[0]['y']}, ]", "= -((-basex + 4) % baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw()", "# must return which pipe is colliding (lower or upper) if FURYMODE: return", "+ 1) % 3 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter +", "= pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if uCollide: # for fury mode we want", "next(playerIndexGen) loopIter = (loopIter + 1) % 30 basex = -((-basex + 100)", "crashInfo['playerVelY'] playerAccY = 2 playerRot = crashInfo['playerRot'] playerVelRot = 7 count=0 gameover =", "(playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the player down ans shows gameover", "elif (event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or", "play hit and die sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play() while True: for", "(SCREENWIDTH - totalWidth) / 2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT *", "/ 2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset +=", "SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX): particle =", "\"Print a message in a box in the middle of the screen\" pygame.draw.rect(screen,", "if playerFlapped: playerFlapped = False # more rotation to cover the threshold (calculated", "\"timer\" (a frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes particles amount (for each", "IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha())", "random background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() #", "= crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit and die sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']:", "values for mainGame FURYMODE = True SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'],", "# player and pipe hitmasks pHitMask = HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] # if", "sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random", "of gap between upper and lower pipe gapY = random.randrange(0, int(BASEY * 0.6", "int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey", "aMax = math.pi * .25 angle = random.random() * (aMax - aMin) +", "current_string = current_string[0:-1] elif inkey == K_RETURN: break elif inkey == K_MINUS: current_string.append(\"_\")", "randPlayer = random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), )", "crashTest[0]: # the player hits a pipe in fury mode if FURYMODE and", "lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin = getRandomCoin() coins.append(newCoin[0]) # remove first pipe if", "playery += min(playerVelY, BASEY - playery - playerHeight) # player velocity change if", "False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0)) SCREEN.blit(pygame.font.Font(None,50).render(\"LEADERBOARD\", 1,", "(0,0)) for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1],", "Y, max ascend speed playerAccY = 1 # players downward accleration playerRot =", "100, (screen.get_height() / 2) - 10)) pygame.display.flip() def ask(screen, question): \"ask(screen, question) ->", "== K_ESCAPE): pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos):", "adjust playery, playerIndex, basex if (loopIter + 1) % 5 == 0: playerIndex", "particles.remove(particle) else: # add new pipes when first pipe is about to touch", "pipe): \"\"\" Add paticles to the particle list randomly generated with pipe's rectangle", "추가된 부분 coinTest = checkCoin({'x': playerx, 'y': playery, 'index': playerIndex}, coins) if crashTest[0]:", "\"\"\" returns a randomly generated coin \"\"\" coinY = random.randrange(20, int(BASEY * 0.6))", "75, (screen.get_height() / 2) - 50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() / 2) -", "{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe {'x': pipeX, 'y': gapY", "# adjust playery, playerIndex, basex if (loopIter + 1) % 5 == 0:", "48 # list of all possible players (tuple of 3 positions of flap)", "playerVelY < 15: playerVelY += playerAccY # rotate only when it's a pipe", "((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10, 200,20), 0) pygame.draw.rect(screen,", "= HITMASKS['pipe'][0] # if bird collided with pipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask,", "% baseShift) # rotate the player if playerRot > -90: playerRot -= playerVelRot", "pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground)", "= int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0],", "(0, 255, 50) BLUE = (50, 50, 255) GREY = (200, 200, 200)", "which pipe is colliding (lower or upper) if FURYMODE: return [True, False, False,", "playery - playerHeight) # move pipes to left for uPipe in upperPipes: uPipe['x']", "newPipe2[1]['y']}, ] # 추가된 부분 newCoin1 = getRandomCoin() newCoin2 = getRandomCoin() coins =", "IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap sound and return values for mainGame FURYMODE =", "( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list of backgrounds BACKGROUNDS_LIST = (", "IMAGES['player'][0].get_height() # if player crashes into ground if player['y'] + player['h'] >= BASEY", "getRandomCoin(): \"\"\" returns a randomly generated coin \"\"\" coinY = random.randrange(20, int(BASEY *", "'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H',", "\"\"\" Combination of static and dynamic graphics in a copy of the basic", "# {'x': position-x, 'y': position-y, # 'vx': velocity-x, 'vy': velocity-y, # 'i': index", "/ 2 for pipe in upperPipes: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2", "if player collders with base or pipes.\"\"\" global FURYMODE pi = player['index'] player['w']", ") # pipes' particles for fury mode # pipes are green if pipeindex", "= 0 # amount by which base can maximum shift to left baseShift", "numbers sprites for score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(),", "key for fury mode if (event.type == KEYDOWN and event.key == K_1) or", "upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin = getRandomCoin() coins.append(newCoin[0]) # remove first pipe", "basex = 0 # amount by which base can maximum shift to left", "1 SOUNDS['point'].play() coins.pop(0) # check for score playerMidPos = playerx + IMAGES['player'][0].get_width() /", "* 0.68) hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT * 0.74) furymodex", "attribute indicates slider movement due to mouse interaction # Static graphics - slider", "pipe['x'] + pipeW) particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES)", "playerAccY # remove if the particle is under the ground if particle['y'] >=", "FURYMODE: for particle in particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print", "2), 'y': newPipe2[1]['y']}, ] # 추가된 부분 newCoin1 = getRandomCoin() newCoin2 = getRandomCoin()", "# add new pipes when first pipe is about to touch left of", "randomly generated pipe \"\"\" # y of gap between upper and lower pipe", "only when it's a pipe crash if not crashInfo['groundCrash']: if playerRot > -90:", "lowerPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH + 200", "= (SCREENWIDTH - totalWidth) / 2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT", "sprites overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2) overy = int(SCREENHEIGHT * 0.5)", "cHitMask) if cCollide : return [True, False] return [False, False] def pixelCollision(rect1, rect2,", "FURYMODE: furymodePipeFrameCounter += 1 # the counter has the max value, we must", "if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's a lower pipe else: lowerPipes.remove(crashTest[3]) else: return", "display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(),", "inkey = get_key() if inkey == K_BACKSPACE: current_string = current_string[0:-1] elif inkey ==", "right padding) furymodeKeyX = furymodex + IMAGES['furymode'].get_width() + 8 furymodeKeyY = furymodey +", "(overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject =", "and (event.key == K_SPACE or event.key == K_UP)) or ((event.type == MOUSEBUTTONDOWN and", "- 100, (SCREEN.get_height() / 2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) +", "mode if FURYMODE and not crashTest[1]: spawnParticles(particles, crashTest[3]) # remove the pipe #", "player down ans shows gameover image\"\"\" global FURYMODE, EASYMODE FURYMODE = False EASYMODE", "and return values for mainGame FURYMODE = True SOUNDS['wing'].play() return { 'playery': playery", "FURYMODE pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if player", "2) - 75, (screen.get_height() / 2) - 50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() /", "crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit and die sounds SOUNDS['hit'].play()", "(furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0", "= playerx + IMAGES['player'][0].get_width() / 2 for pipe in upperPipes: pipeMidPos = pipe['x']", "# more rotation to cover the threshold (calculated in visible rotation) playerRot =", "= range class Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d:", "10 \"\"\" returns a randomly generated pipe \"\"\" # y of gap between", "# static surf = self.surf.copy() # dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect =", "0 playerx = int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)", "particles.append(particle) # sound effect SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True if player", "# counter reset furymodePipeFrameCounter = 0 # pipe spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0])", "0 # amount by which base can maximum shift to left baseShift =", "lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY))", "value of playerShm['val'] between 8 and -8\"\"\" if abs(playerShm['val']) == 8: playerShm['dir'] *=", "= False score = crashInfo['score'] playerx = SCREENWIDTH * 0.2 playery = crashInfo['y']", "if len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width() / 2) - 75,", "rect2.x, rect.y - rect2.y for x in xrange(rect.width): for y in xrange(rect.height): if", "and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True # make first flap", "uPipe in upperPipes: # pipe rect uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) #", "cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if cCollide : return [True, False] return", "hitmask2): \"\"\"Checks if two objects collide and not just their rects\"\"\" rect =", "static and dynamic graphics in a copy of the basic slide surface \"\"\"", "lPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi] lHitmask =", "pipeVelX # update (add / remove) pipes and particles if FURYMODE: furymodePipeFrameCounter +=", "= pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score > TOPFIVE[4][1] and count==0) :", "0.74) furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2) furymodey = int(SCREENHEIGHT * 0.80)", "newCoin2[0]['y']}, ] pipeVelX = -4 # player velocity, max velocity, downward accleration, accleration", "pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for pipes", "minimum velocity vel = random.random() * 10 + 5 aMin = -math.pi *", "return values for mainGame FURYMODE = True SOUNDS['wing'].play() return { 'playery': playery +", "= {} particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW) particle['y'] = random.randint(pipe['y'], pipe['y'] +", "\"\"\" Add paticles to the particle list randomly generated with pipe's rectangle (hitbox)", "rect.height == 0: return False x1, y1 = rect.x - rect1.x, rect.y -", "objects collide and not just their rects\"\"\" rect = rect1.clip(rect2) if rect.width ==", "'y': position-y, # 'vx': velocity-x, 'vy': velocity-y, # 'i': index in textures list}", "playerFlapped: playerVelY += playerAccY if playerFlapped: playerFlapped = False # more rotation to", "HITMASKS['pipe'][0] # if bird collided with pipe lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)", "a minimum velocity vel = random.random() * 10 + 5 aMin = -math.pi", "# if bird collided with pipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if", "used to change playerIndex after every 5th iteration loopIter = 0 playerx =", "= {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F',", "> TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'],", "must be removed from the list for uPipe in upperPipes: if uPipe['x'] <", "IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() #", "== pygame.MOUSEBUTTONUP: SLIDER.hit = False # Move volume slider if SLIDER.hit: SLIDER.move() for", "down ans shows gameover image\"\"\" global FURYMODE, EASYMODE FURYMODE = False EASYMODE =", "부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo)", "return # player y shift if playery + playerHeight < BASEY - 1:", "(add / remove) pipes and particles if FURYMODE: furymodePipeFrameCounter += 1 # the", "playerHeight) # player velocity change if playerVelY < 15: playerVelY += playerAccY #", "pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score > TOPFIVE[4][1] and count==0) : SCREEN.blit(IMAGES['gameover'],", "gap between upper and lower part of pipe BASEY=SCREENHEIGHT * 0.79 # image,", "get 2 new pipes to add to upperPipes lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY)", "SCREENWIDTH + 200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2),", "make first flap sound and return values for mainGame FURYMODE = True SOUNDS['wing'].play()", "coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))", "pipeMidPos <= playerMidPos < pipeMidPos + 4: score += 1 SOUNDS['point'].play() # playerIndex", "newCoin1[0]['y']}, {'x': SCREENWIDTH + 280 + (SCREENWIDTH / 2), 'y': newCoin2[0]['y']}, ] pipeVelX", "playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for", "hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT * 0.74) furymodex = int((SCREENWIDTH", "% 3 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter + 1) %", "pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for", "furymodePipeFrameCounter = 0 while True: for event in pygame.event.get(): if event.type == QUIT", "PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH +", "for fury mode # pipes are green if pipeindex == 0: IMAGES['pipe-particle'] =", "50) YELLOW = (255, 255, 0) GREEN = (0, 255, 50) BLUE =", "# players downward accleration playerRot = 45 # player's rotation playerVelRot = 3", "not just their rects\"\"\" rect = rect1.clip(rect2) if rect.width == 0 or rect.height", "to cover the threshold (calculated in visible rotation) playerRot = 45 playerHeight =", "'y': newPipe1[0]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']}, ]", "after every 5th iteration loopIter = 0 playerx = int(SCREENWIDTH * 0.2) playery", "부분 if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe", "in list(str(score))] totalWidth = 0 # total width of all numbers to be", "index in textures list} particles = [] # 추가된 부분 coins = []", "list of lowerpipe lowerPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']}, {'x':", "for x in xrange(image.get_width()): mask.append([]) for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def", "coins) if crashTest[0]: # the player hits a pipe in fury mode if", "(200, 200, 200) ORANGE = (200, 100, 50) CYAN = (0, 255, 255)", "-132, (SCREEN.get_height() / 2) -220)) for i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width()", "if 0 < upperPipes[0]['x'] < 5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된", "= pygame.font.Font(None,30) \"Print a message in a box in the middle of the", "(with the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode']", "newCoin = getRandomCoin() coins.append(newCoin[0]) # remove first pipe if its out of the", "130 # gap between upper and lower part of pipe BASEY=SCREENHEIGHT * 0.79", "( # amount by which base can maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png',", "if playerVelY < 15: playerVelY += playerAccY # rotate only when it's a", "1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe sprites", "for sound in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for event in pygame.event.get(): if event.type", "SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the player down ans shows", "coins: coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask = HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0]", "or rect.height == 0: return False x1, y1 = rect.x - rect1.x, rect.y", "iterator used to change playerIndex after every 5th iteration loopIter = 0 playerx", "+ 1) % 30 basex = -((-basex + 100) % baseShift) # rotate", "FURYMODE and not crashTest[1]: spawnParticles(particles, crashTest[3]) # remove the pipe # it's an", "5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin = getRandomCoin() coins.append(newCoin[0])", "else : SCREEN.blit(IMAGES['speaker'][1], (160,15)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'],", "'y': gapY + PIPEGAPSIZE}, # lower pipe ] # 추가된 부분 def getRandomCoin():", "1) if len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width() / 2) -", "0 score = playerIndex = loopIter = 0 playerIndexGen = movementInfo['playerIndexGen'] playerx, playery", "pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message", "for uPipe in upperPipes: uPipe['x'] += pipeVelX for lPipe in lowerPipes: lPipe['x'] +=", "BASEY - 1: playery += min(playerVelY, BASEY - playery - playerHeight) # player", "the \"fury mode\" button for welcome screen (with the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha()", "FURYMODE: return [True, False, False, lPipe] # normal mode return [True, False] return", "= (255, 0, 255) TRANS = (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30", "= pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif event.type == pygame.MOUSEBUTTONUP: SLIDER.hit =", "crashInfo['score'] playerx = SCREENWIDTH * 0.2 playery = crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY", "speed playerMinVelY = -8 # min vel along Y, max ascend speed playerAccY", "dynamic part; reacts to movement of the slider button. \"\"\" self.val = (pygame.mouse.get_pos()[0]", "getHitmask(IMAGES['pipe'][1]), ) # hitmask for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), )", "/ 2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT *", "move pipes to left for uPipe in upperPipes: uPipe['x'] += pipeVelX for lPipe", "a box in the middle of the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2)", "2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width()", "right of the fury mode button (8 is right padding) furymodeKeyX = furymodex", "player and pipe hitmasks pHitMask = HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] # if bird", "# remove if the particle is under the ground if particle['y'] >= BASEY:", "= self.surf.copy() # dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect)", "= False # True when player flaps # The counter to spawn new", "pygame.MOUSEBUTTONUP: SLIDER.hit = False # Move volume slider if SLIDER.hit: SLIDER.move() for sounds", "if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) # particles for particle in particles: # speed", "for i in range(FURYMODE_PARTICLES_MAX): particle = {} particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW)", "correct screen position # screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self): \"\"\"", "(1) key for fury mode if (event.type == KEYDOWN and event.key == K_1)", "pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0) def draw(self): \"\"\" Combination of static and", "graphics in a copy of the basic slide surface \"\"\" # static surf", "lPipe['x'] += pipeVelX # 추가된 부분 for coin in coins: coin['x'] += pipeVelX", "(loopIter + 1) % 5 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter", "IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else:", "SOUNDS['die'].play() while True: for event in pygame.event.get(): if event.type == QUIT or (event.type", "for lPipe in lowerPipes: # pipe rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)", "lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) # list of upper pipes", "of lowerpipe lowerPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH", "newPipe1[0]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']}, ] #", "mode return [True, False] return [False, False] # 추가된 부분 def checkCoin(player, coins):", "SCORE!!!\", 1, (255,255,255)), ((screen.get_width() / 2) - 75, (screen.get_height() / 2) - 50))", "to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm for up-down motion", "soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) # volume slider(defaultValue, maximum, minimum, Xposition, Yposition)", "\"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns a hitmask using an image's alpha.\"\"\"", "int(BASEY * 0.6 - PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height()", "= IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY = 2 playerRot = crashInfo['playerRot'] playerVelRot =", "pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()", "SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites for score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(),", "/ 2) -132, (SCREEN.get_height() / 2) -220)) for i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0],", "furymodeKeyX = furymodex + IMAGES['furymode'].get_width() + 8 furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() /", "TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns a hitmask using an image's", "int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT * 0.74) furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width())", "flapping playerFlapped = False # True when player flaps # The counter to", "+= pipeVelX # 추가된 부분 for coin in coins: coin['x'] += pipeVelX #", "playerRot <= playerRotThr: visibleRot = playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery))", "부분 for coin in coins: coin['x'] += pipeVelX # update (add / remove)", "1 SOUNDS['point'].play() # playerIndex basex change if (loopIter + 1) % 3 ==", "playerMidPos < pipeMidPos + 4: score += 1 SOUNDS['point'].play() # playerIndex basex change", "normal mode return [True, False] return [False, False] # 추가된 부분 def checkCoin(player,", "be printed for digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH -", "and lower part of pipe BASEY=SCREENHEIGHT * 0.79 # image, sound and hitmask", "FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites for", "getHitmask(IMAGES['player'][2]), ) # 추가된 부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), ) movementInfo = showWelcomeAnimation()", "int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT * 0.12) easymodex = int((SCREENWIDTH", "list of upper pipes upperPipes = [] # list of lowerpipe lowerPipes =", "visibleRot = playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def", "velocity change if playerVelY < 15: playerVelY += playerAccY # rotate only when", "width of all numbers to be printed for digit in scoreDigits: totalWidth +=", "playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # (1) key for fury", "counter to spawn new pipes furymodePipeFrameCounter = 0 while True: for event in", "= 45 playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY - playery - playerHeight)", "pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(),", "True return False def writeScore(score): TOPFIVE.append((ask(SCREEN,\"NAME: \"),score)) TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns", "= {}, {}, {} # True if the user plays the fury mode", "1]) # iterator used to change playerIndex after every 5th iteration loopIter =", "(0,0,0), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10, 200,20), 0)", "), # yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list of", "lowerPipes: lPipe['x'] += pipeVelX # 추가된 부분 for coin in coins: coin['x'] +=", "elif inkey == K_RETURN: break elif inkey == K_MINUS: current_string.append(\"_\") elif inkey <=", "to be printed for digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH", "= Slider(0.5, 1, 0, 190, 0) while True: # select random background sprites", "SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery))", "IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT * 0.74) furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2)", "+= 1 SOUNDS['point'].play() coins.pop(0) # check for score playerMidPos = playerx + IMAGES['player'][0].get_width()", "4) % baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15))", "SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe in lowerPipes:", "coin in coins: coin['x'] += pipeVelX # update (add / remove) pipes and", "maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird ( 'assets/sprites/yellowbird-upflap.png',", "'playerIndexGen': playerIndexGen, } # (1) key for fury mode if (event.type == KEYDOWN", "\"\"\"Checks if two objects collide and not just their rects\"\"\" rect = rect1.clip(rect2)", "playerx + IMAGES['player'][0].get_width() / 2 for pipe in upperPipes: pipeMidPos = pipe['x'] +", "total width of all numbers to be printed for digit in scoreDigits: totalWidth", "first pipe if its out of the screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0)", "getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된 부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']), ) movementInfo =", "추가된 부분 if coinTest[0]: score += 1 SOUNDS['point'].play() coins.pop(0) # check for score", "IMAGES['furymode'].get_width()) / 2) furymodey = int(SCREENHEIGHT * 0.80) # just at right of", "IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win' in sys.platform: soundExt = '.wav' else:", "+= particle['vx'] particle['y'] += particle['vy'] # gravity particle['vy'] += playerAccY # remove if", "pipeHeight}, # upper pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe", "IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분", "keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f:", "pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)", ".35 aMax = math.pi * .25 angle = random.random() * (aMax - aMin)", "frame counter) FURYMODE_FRAMES_TO_SPAWN_PIPES = 35 # pipes particles amount (for each pipe) FURYMODE_PARTICLES", "for coin in coins: coin['x'] += pipeVelX # update (add / remove) pipes", "== K_SPACE or event.key == K_UP)) or ((event.type == MOUSEBUTTONDOWN and event.button ==", "2 playerRot = crashInfo['playerRot'] playerVelRot = 7 count=0 gameover = True basex =", "pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def main(): global SCREEN, FPSCLOCK, SLIDER pygame.init()", "in SOUNDS: SOUNDS[sound].set_volume(SLIDER.val) while True: for event in pygame.event.get(): if event.type == QUIT", "# player y shift if playery + playerHeight < BASEY - 1: playery", "gameover = True basex = crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] # play", "playerRotThr: visibleRot = playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS)", "+ PIPEGAPSIZE}, # lower pipe ] # 추가된 부분 def getRandomCoin(): \"\"\" returns", "= IMAGES['coin'].get_height() for coin in coins: coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask", "# list of upper pipes upperPipes = [] # list of lowerpipe lowerPipes", "val, maxi, mini, xpos, ypos): self.val = val # start value self.maxi =", "def showWelcomeAnimation(): \"\"\"Shows welcome screen animation of flappy bird\"\"\" global FURYMODE, EASYMODE #", "FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter = 0 # pipe spawn pipes = getRandomPipe(DIFFICULTY)", "Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E',", "+ 1) % 30 basex = -((-basex + 4) % baseShift) playerShm(playerShmVals) #", "newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1]) # 추가된 부분 newCoin = getRandomCoin() coins.append(newCoin[0]) #", "scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth) / 2 for digit", "\"\"\"Shows welcome screen animation of flappy bird\"\"\" global FURYMODE, EASYMODE # index of", "== K_SPACE or event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button ==", "TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH = 288 SCREENHEIGHT = 512 # amount by", "= rect.x - rect2.x, rect.y - rect2.y for x in xrange(rect.width): for y", "# get 2 new pipes to add to upperPipes lowerPipes list newPipe1 =", "1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles for", "-8\"\"\" if abs(playerShm['val']) == 8: playerShm['dir'] *= -1 if playerShm['dir'] == 1: playerShm['val']", "(overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover ==", "(hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for", "BASEY=SCREENHEIGHT * 0.79 # image, sound and hitmask dicts IMAGES, SOUNDS, HITMASKS =", "blit on screen playerIndex = 0 playerIndexGen = cycle([0, 1, 2, 1]) #", "BASEY - 1: return # player y shift if playery + playerHeight <", "base can maximum shift to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # player", "if coinTest[0]: score += 1 SOUNDS['point'].play() coins.pop(0) # check for score playerMidPos =", "pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles for fury mode # pipes are", "playery = int(SCREENWIDTH * 0.2), movementInfo['playery'] basex = movementInfo['basex'] baseShift = IMAGES['base'].get_width() -", "hitmask1, hitmask2): \"\"\"Checks if two objects collide and not just their rects\"\"\" rect", "100, (SCREEN.get_height() / 2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75,", "'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def main(): global SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK", "NameError: xrange = range class Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c:", "if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit()", "SOUNDS['point'].play() coins.pop(0) # check for score playerMidPos = playerx + IMAGES['player'][0].get_width() / 2", "coin['x'] += pipeVelX # update (add / remove) pipes and particles if FURYMODE:", ") else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(),", "0) RED = (255, 50, 50) YELLOW = (255, 255, 0) GREEN =", "6, 0) def draw(self): \"\"\" Combination of static and dynamic graphics in a", "plays the fury mode FURYMODE = False EASYMODE = False # In fury", "right self.mini = mini # minimum at slider position left self.xpos = xpos", "255, 50) BLUE = (50, 50, 255) GREY = (200, 200, 200) ORANGE", "= 8 # max particles for each pipe hit FURYMODE_PARTICLES_MAX = 48 #", "if EASYMODE: DIFFICULTY = 4 # get 2 new pipes to add to", "sprite for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base'] =", "a pipe crash if not crashInfo['groundCrash']: if playerRot > -90: playerRot -= playerVelRot", "HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] # if bird collided with pipe uCollide = pixelCollision(playerRect,", "# 추가된 부분 for coin in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes' particles", "= IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX): particle = {} particle['x'] = random.randint(pipe['x'], pipe['x']", "= -8 # min vel along Y, max ascend speed playerAccY = 1", "0: return False x1, y1 = rect.x - rect1.x, rect.y - rect1.y x2,", "class Slider(): def __init__(self, val, maxi, mini, xpos, ypos): self.val = val #", "player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if player crashes into ground if", "1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen':", "for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for player HITMASKS['player']", "hitmasks pHitMask = HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] # if bird collided with pipe", "IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() #", "self.maxi = maxi # maximum at slider position right self.mini = mini #", "in fury mode if FURYMODE and not crashTest[1]: spawnParticles(particles, crashTest[3]) # remove the", "maxi, mini, xpos, ypos): self.val = val # start value self.maxi = maxi", "Slider(0.5, 1, 0, 190, 0) while True: # select random background sprites randBg", ") movementInfo = showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome screen", "# draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SLIDER.draw() if(SLIDER.val>0): SCREEN.blit(IMAGES['speaker'][0], (160,15)) else : SCREEN.blit(IMAGES['speaker'][1], (160,15))", "(basex, BASEY)) showScore(score) playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot) SCREEN.blit(playerSurface, (playerx,playery)) #showScore(score) if (score >", "the key) IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha() IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] =", "+ 5 aMin = -math.pi * .35 aMax = math.pi * .25 angle", "colliding (lower or upper) if FURYMODE: return [True, False, False, lPipe] # normal", "2 if pipeMidPos <= playerMidPos < pipeMidPos + 4: score += 1 SOUNDS['point'].play()", "BASEY - 1: return [True, True] else: playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h'])", "lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if lCollide: # for fury mode we", "IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(), pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if", "player if playerRot > -90: playerRot -= playerVelRot # player's movement if playerVelY", "count=count+1 pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) showLeaderboard()", "= 7 count=0 gameover = True basex = crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'],", "# 'i': index in textures list} particles = [] # 추가된 부분 coins", "MOUSEBUTTONDOWN and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap sound and", "bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list of backgrounds BACKGROUNDS_LIST =", "IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles for fury", "BLACK = (0, 0, 0) RED = (255, 50, 50) YELLOW = (255,", "playerFlapped playerMaxVelY = 10 # max vel along Y, max descend speed playerMinVelY", "+= pipeVelX for lPipe in lowerPipes: lPipe['x'] += pipeVelX # 추가된 부분 for", "xrange except NameError: xrange = range class Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b:", "hitmask using an image's alpha.\"\"\" mask = [] for x in xrange(image.get_width()): mask.append([])", "\" + \"\".join(current_string)) return \"\".join(current_string) class Slider(): def __init__(self, val, maxi, mini, xpos,", "/ 2) -160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the value of", "True if the user plays the fury mode FURYMODE = False EASYMODE =", "ORANGE = (200, 100, 50) CYAN = (0, 255, 255) MAGENTA = (255,", "slider(defaultValue, maximum, minimum, Xposition, Yposition) SLIDER = Slider(0.5, 1, 0, 190, 0) while", "True: for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN", "127: current_string.append(chr(inkey)) display_box(screen, question + \": \" + \"\".join(current_string)) return \"\".join(current_string) class Slider():", "80, 5], 0) # dynamic graphics - button surface # self.button_surf = pygame.surface.Surface((15,", "pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return [ {'x': pipeX, 'y':", "accleration on flap playerVelY = -9 # player's velocity along Y, default same", "basic slide surface \"\"\" # static surf = self.surf.copy() # dynamic pos =", "'y': newCoin2[0]['y']}, ] pipeVelX = -4 # player velocity, max velocity, downward accleration,", "message): fontobject = pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print a message in a box", "check for crash here crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex}, upperPipes,", ") # hismask for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask", "for a minimum velocity vel = random.random() * 10 + 5 aMin =", "y1 = rect.x - rect1.x, rect.y - rect1.y x2, y2 = rect.x -", "IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <= playerMidPos < pipeMidPos + 4: score +=", "== MOUSEBUTTONDOWN and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())): # make first flap sound", "= (255, 50, 50) YELLOW = (255, 255, 0) GREEN = (0, 255,", "select random background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()", "IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, }", "furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0 score", "-2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped = True SOUNDS['wing'].play() # check for", "pipe so we # must return which pipe is colliding (lower or upper)", "lowerPipes.remove(crashTest[3]) else: return { 'y': playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': upperPipes, 'lowerPipes':", "pipe's rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH =", "and event.key == K_2) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and", "), ) # list of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) #", "min(playerVelY, BASEY - playery - playerHeight) # move pipes to left for uPipe", "returns a randomly generated coin \"\"\" coinY = random.randrange(20, int(BASEY * 0.6)) coinX", "IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites randPlayer = random.randint(0, len(PLAYERS_LIST) -", "pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites for score display", "= random.randint(pipe['y'], pipe['y'] + pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1 # random", "+ (SCREENWIDTH / 2), 'y': newPipe2[0]['y']}, ] # list of lowerpipe lowerPipes =", "playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx, playery)) pygame.display.update() FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes", "y of gap between upper and lower pipe gapY = random.randrange(0, int(BASEY *", "TOPFIVE.sort(key=itemgetter(1),reverse= True) TOPFIVE.pop() def getHitmask(image): \"\"\"returns a hitmask using an image's alpha.\"\"\" mask", "40)) self.hit = False # the hit attribute indicates slider movement due to", "(furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE", "0.2 playery = crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY = 2", "getRandomCoin() coins.append(newCoin[0]) # remove first pipe if its out of the screen if", "/ 2) + 75, (SCREEN.get_height() / 2) -160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def", "self.val < self.mini: self.val = self.mini if self.val > self.maxi: self.val = self.maxi", "* 0.80) # just at right of the fury mode button (8 is", "True # make first flap sound and return values for mainGame SOUNDS['wing'].play() return", "rect1.x, rect.y - rect1.y x2, y2 = rect.x - rect2.x, rect.y - rect2.y", "upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분 if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0)", "math.sin(angle) * vel particles.append(particle) # sound effect SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns", "else: if EASYMODE: DIFFICULTY = 4 # get 2 new pipes to add", "playerAccY = 1 # players downward accleration playerRot = 45 # player's rotation", "'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N',", "flappy bird\"\"\" global FURYMODE, EASYMODE # index of player to blit on screen", "easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT * 0.68) hardmodex = int((SCREENWIDTH", "# 추가된 부분 coinTest = checkCoin({'x': playerx, 'y': playery, 'index': playerIndex}, coins) if", "pipe must be removed from the list for uPipe in upperPipes: if uPipe['x']", "spawn new pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter reset furymodePipeFrameCounter = 0", "# minimum at slider position left self.xpos = xpos # x-location on screen", "= ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된 부분 HITMASKS['coin'] = ( getHitmask(IMAGES['coin']),", "list of particles # a particle is an object with attributes: # {'x':", ") # hitmask for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) #", "hitmask for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된 부분", "question): \"ask(screen, question) -> answer\" pygame.font.init() current_string = [] display_box(screen, question + \":", "RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() / 2) -220)) for i in range(0,5) :", "playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # adjust playery, playerIndex, basex if (loopIter", "200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']}, ] # list of lowerpipe lowerPipes", "max particles for each pipe hit FURYMODE_PARTICLES_MAX = 48 # list of all", "IMAGES['background'].get_width() # no need to spawn pipes at start if FURYMODE: # list", "player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe in upperPipes:", "a randomly generated coin \"\"\" coinY = random.randrange(20, int(BASEY * 0.6)) coinX =", "80 * (self.maxi - self.mini) + self.mini if self.val < self.mini: self.val =", "== KEYDOWN and event.key == K_1) or ((event.type == MOUSEBUTTONDOWN and event.button ==", "YELLOW = (255, 255, 0) GREEN = (0, 255, 50) BLUE = (50,", "= cycle([0, 1, 2, 1]) # iterator used to change playerIndex after every", "random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select", "flap sound and return values for mainGame FURYMODE = True SOUNDS['wing'].play() return {", "pipe ] # 추가된 부분 def getRandomCoin(): \"\"\" returns a randomly generated coin", "0.80) # just at right of the fury mode button (8 is right", "((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True #", "- pipeHeight}, # upper pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower", "rect1.y x2, y2 = rect.x - rect2.x, rect.y - rect2.y for x in", "= [] for x in xrange(image.get_width()): mask.append([]) for y in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return", "default same as playerFlapped playerMaxVelY = 10 # max vel along Y, max", "global FURYMODE pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if", "(8 is right padding) furymodeKeyX = furymodex + IMAGES['furymode'].get_width() + 8 furymodeKeyY =", "in coins: coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask = HITMASKS['player'][pi] cHitMask =", "len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width() / 2) - 75, (screen.get_height()", "IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return [ {'x': pipeX, 'y': gapY -", "scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\"", "(50, 50, 255) GREY = (200, 200, 200) ORANGE = (200, 100, 50)", "self.xpos = xpos # x-location on screen self.ypos = ypos # y-location on", "# 추가된 부분 newCoin = getRandomCoin() coins.append(newCoin[0]) # remove first pipe if its", "] # list of lowerpipe lowerPipes = [ {'x': SCREENWIDTH + 200, 'y':", "self.surf.copy() # dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos,", "message sprite for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base']", "-160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the value of playerShm['val'] between", "spawnParticles(particles, pipe): \"\"\" Add paticles to the particle list randomly generated with pipe's", "pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] # if bird collided with pipe uCollide", "left PIPEGAPSIZE = 130 # gap between upper and lower part of pipe", "from operator import itemgetter import random import sys import math import pygame from", "center of screen\"\"\" scoreDigits = [int(x) for x in list(str(score))] totalWidth = 0", "sound effect SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True if player collders with", "( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird ( # amount by which", "= IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY - playery - playerHeight) # move pipes", "= 0 while True: for event in pygame.event.get(): if event.type == QUIT or", "to left PIPEGAPSIZE = 130 # gap between upper and lower part of", "SCREENWIDTH + 280 + (SCREENWIDTH / 2), 'y': newCoin2[0]['y']}, ] pipeVelX = -4", "# list of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange except", "= pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' +", "rects\"\"\" rect = rect1.clip(rect2) if rect.width == 0 or rect.height == 0: return", "\"\"\" coinY = random.randrange(20, int(BASEY * 0.6)) coinX = SCREENWIDTH + 100 return", "playerIndexGen, } # (1) key for fury mode if (event.type == KEYDOWN and", "1): if playery > -2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped = True", "shift if playery + playerHeight < BASEY - 1: playery += min(playerVelY, BASEY", "volume slider(defaultValue, maximum, minimum, Xposition, Yposition) SLIDER = Slider(0.5, 1, 0, 190, 0)", "digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth) / 2", "- playery - playerHeight) # move pipes to left for uPipe in upperPipes:", "= rect.x - rect1.x, rect.y - rect1.y x2, y2 = rect.x - rect2.x,", "= self.mini if self.val > self.maxi: self.val = self.maxi if __name__ == '__main__':", "'.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] =", "= 0 playerIndexGen = movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery'] basex", "# pipe rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player and pipe", "playerRot = 45 # player's rotation playerVelRot = 3 # angular speed playerRotThr", "the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) -", "static surf = self.surf.copy() # dynamic pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos)", "gap between upper and lower pipe gapY = random.randrange(0, int(BASEY * 0.6 -", "max value, we must spawn new pipes if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES: # counter", "getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]),", "else: return { 'y': playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes,", "gameover = False pygame.time.delay(1000) showLeaderboard() FPSCLOCK.tick(FPS) pygame.display.update() def showLeaderboard(): fontobject = pygame.font.Font(None,30) SCREEN.blit(IMAGES['background'],(0,0))", "IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped = True SOUNDS['wing'].play() # check for crash here", "import time WHITE = (255, 255, 255) BLACK = (0, 0, 0) RED", "reacts to movement of the slider button. \"\"\" self.val = (pygame.mouse.get_pos()[0] - self.xpos", "] pipeVelX = -4 # player velocity, max velocity, downward accleration, accleration on", "0.68) hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2) hardmodey = int(SCREENHEIGHT * 0.74) furymodex =", "- IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey =", "The dynamic part; reacts to movement of the slider button. \"\"\" self.val =", "= pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites for score", "spawnParticles(particles, crashTest[3]) # remove the pipe # it's an upper pipe if crashTest[2]:", "math.pi * .25 angle = random.random() * (aMax - aMin) + aMin particle['vx']", "velocity vel = random.random() * 10 + 5 aMin = -math.pi * .35", "return { 'y': playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score':", "= (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move of", "EASYMODE FURYMODE = False EASYMODE = False score = crashInfo['score'] playerx = SCREENWIDTH", "if abs(playerShm['val']) == 8: playerShm['dir'] *= -1 if playerShm['dir'] == 1: playerShm['val'] +=", "in range(FURYMODE_PARTICLES_MAX): particle = {} particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW) particle['y'] =", "for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'],", "FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0 score = playerIndex", "pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print a message in a box in the middle", "and event.key == K_ESCAPE): pygame.quit() sys.exit() if (event.type == KEYDOWN and (event.key ==", "if lCollide: # for fury mode we want to break the pipe so", "= xpos # x-location on screen self.ypos = ypos # y-location on screen", "= pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <= playerMidPos < pipeMidPos +", "- 50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() / 2) - 100, (screen.get_height() / 2)", "50, 50) YELLOW = (255, 255, 0) GREEN = (0, 255, 50) BLUE", "pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0 score =", "playerIndex}, coins) if crashTest[0]: # the player hits a pipe in fury mode", "* import time WHITE = (255, 255, 255) BLACK = (0, 0, 0)", "pipes to add to upperPipes lowerPipes list newPipe1 = getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY)", "cCollide : return [True, False] return [False, False] def pixelCollision(rect1, rect2, hitmask1, hitmask2):", "'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J',", "of screen if 0 < upperPipes[0]['x'] < 5: newPipe = getRandomPipe(DIFFICULTY) upperPipes.append(newPipe[0]) lowerPipes.append(newPipe[1])", "def move(self): \"\"\" The dynamic part; reacts to movement of the slider button.", "[ {'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH + 280 + (SCREENWIDTH", "in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) #", "FURYMODE = False EASYMODE = False # In fury mode, the pipe sapwn", "is under the ground if particle['y'] >= BASEY: particles.remove(particle) else: # add new", "fury mode we want to break the pipe so we # must return", "else: pass def display_box(screen, message): fontobject = pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print a", "fury mode, the pipe sapwn system is different than in # normal mode,", "(SCREEN.get_height() / 2) -160 + (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height()", "50)) screen.blit(fontobject.render(message, 1, (255,255,255)), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) -", "-math.pi * .35 aMax = math.pi * .25 angle = random.random() * (aMax", "% 5 == 0: playerIndex = next(playerIndexGen) loopIter = (loopIter + 1) %", "player hits a pipe in fury mode if FURYMODE and not crashTest[1]: spawnParticles(particles,", "baseShift) # rotate the player if playerRot > -90: playerRot -= playerVelRot #", "sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()", "ascend speed playerAccY = 1 # players downward accleration playerRot = 45 #", "in particles: # speed particle['x'] += particle['vx'] particle['y'] += particle['vy'] # gravity particle['vy']", "MAGENTA = (255, 0, 255) TRANS = (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS =", "10) / 80 * (self.maxi - self.mini) + self.mini if self.val < self.mini:", "{} particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW) particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH)", "and count==0) : SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover = False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score)", "ground if particle['y'] >= BASEY: particles.remove(particle) else: # add new pipes when first", "* 0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH -", "object with attributes: # {'x': position-x, 'y': position-y, # 'vx': velocity-x, 'vy': velocity-y,", "(10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33) self.button_rect = self.button_surf.get_rect(center=pos) surf.blit(self.button_surf, self.button_rect) self.button_rect.move_ip(self.xpos, self.ypos) # move of button", "IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha() IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha() IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha() # speaker sprite IMAGES['speaker']", "particles: SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score so player overlaps", "from pygame.locals import * import time WHITE = (255, 255, 255) BLACK =", "== K_ESCAPE): pygame.quit() sys.exit() if (event.type == KEYDOWN and (event.key == K_SPACE or", "False pygame.time.delay(1000) SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update()", "position right self.mini = mini # minimum at slider position left self.xpos =", "gapY += int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10", "playerVelY = -9 # player's velocity along Y, default same as playerFlapped playerMaxVelY", "or event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1): if", "coinW, coinH) pHitMask = HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide = pixelCollision(playerRect, coinRect, pHitMask,", "# normal mode return [True, False] return [False, False] # 추가된 부분 def", "30 SCREENWIDTH = 288 SCREENHEIGHT = 512 # amount by which base can", "pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites for score display IMAGES['numbers'] = (", "if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분 if coins[0]['x'] < -IMAGES['coin'].get_width():", "FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX):", "to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png',", "pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]),", "or event.key == K_UP)) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and", "or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if event.type ==", "+ aMin particle['vx'] = math.cos(angle) * vel particle['vy'] = math.sin(angle) * vel particles.append(particle)", "False] # 추가된 부분 def checkCoin(player, coins): pi = player['index'] player['w'] = IMAGES['player'][0].get_width()", "= [ {'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']}, {'x': SCREENWIDTH + 280 +", "BASEY)) SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey)) SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey)) SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey)) SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY)) pygame.display.update() FPSCLOCK.tick(FPS) def mainGame(movementInfo):", "event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit = True elif event.type", "playerHeight < BASEY - 1: playery += min(playerVelY, BASEY - playery - playerHeight)", "[ {'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe {'x': pipeX, 'y':", "a particle is an object with attributes: # {'x': position-x, 'y': position-y, #", "in a box in the middle of the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() /", "particle['vy'] # gravity particle['vy'] += playerAccY # remove if the particle is under", "playery + playerHeight < BASEY - 1: playery += min(playerVelY, BASEY - playery", "> -90: playerRot -= playerVelRot # player's movement if playerVelY < playerMaxVelY and", "\": \" + \"\".join(current_string)) return \"\".join(current_string) class Slider(): def __init__(self, val, maxi, mini,", "# select random player sprites randPlayer = random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] =", "if FURYMODE: furymodePipeFrameCounter += 1 # the counter has the max value, we", "of upper pipes upperPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']}, {'x':", "+ (50*i))) SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height() / 2) -160 +", "list randomly generated with pipe's rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW", "pHitMask, cHitMask) if cCollide : return [True, False] return [False, False] def pixelCollision(rect1,", "pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if cCollide : return [True, False] return [False, False]", "in coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes' particles if FURYMODE: for particle in", "lowerpipe lowerPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']}, {'x': SCREENWIDTH +", "1 # the counter has the max value, we must spawn new pipes", "speed particle['x'] += particle['vx'] particle['y'] += particle['vy'] # gravity particle['vy'] += playerAccY #", "speed playerRotThr = 20 # rotation threshold playerFlapAcc = -9 # players speed", "pipe hitmasks pHitMask = HITMASKS['player'][pi] lHitmask = HITMASKS['pipe'][0] # if bird collided with", "+ (SCREENWIDTH / 2), 'y': newPipe2[1]['y']}, ] # 추가된 부분 newCoin1 = getRandomCoin()", "to movement of the slider button. \"\"\" self.val = (pygame.mouse.get_pos()[0] - self.xpos -", "self.mini) + self.mini if self.val < self.mini: self.val = self.mini if self.val >", "event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True # make first flap sound", "= playerRotThr if playerRot <= playerRotThr: visibleRot = playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot)", "K_ESCAPE): pygame.quit() sys.exit() if (event.type == KEYDOWN and (event.key == K_SPACE or event.key", "hismask for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for player", "for mainGame SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen,", "the score showScore(score) # Player rotation has a threshold visibleRot = playerRotThr if", "sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for easymode if (event.type == KEYDOWN and", "pipe else: lowerPipes.remove(crashTest[3]) else: return { 'y': playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes':", "SCREENWIDTH + 10 return [ {'x': pipeX, 'y': gapY - pipeHeight}, # upper", "- 100, (screen.get_height() / 2) - 10, 200,20), 0) pygame.draw.rect(screen, (255,255,255), ((screen.get_width() /", "'win' in sys.platform: soundExt = '.wav' else: soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die'", "is colliding (lower or upper) if FURYMODE: return [True, False, False, lPipe] #", "+ playerHeight >= BASEY - 1: return # player y shift if playery", "'playerVelY': playerVelY, 'playerRot': playerRot } # 추가된 부분 if coinTest[0]: score += 1", "if bird collided with pipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if uCollide:", "FPSCLOCK.tick(FPS) def showGameOverScreen(crashInfo): \"\"\"crashes the player down ans shows gameover image\"\"\" global FURYMODE,", "SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']}, ] # 추가된 부분", "* vel particles.append(particle) # sound effect SOUNDS['hit'].play() def checkCrash(player, upperPipes, lowerPipes): \"\"\"returns True", "SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing'", "in center of screen\"\"\" scoreDigits = [int(x) for x in list(str(score))] totalWidth =", "def checkCoin(player, coins): pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect", "== MOUSEBUTTONDOWN and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE = True # make", "1 else: playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 + DIFFICULTY *", "= val # start value self.maxi = maxi # maximum at slider position", "in xrange(rect.width): for y in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return", "1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH = 288 SCREENHEIGHT = 512 # amount", "SCREEN.blit(IMAGES['background'], (0,0)) writeScore(score) count=count+1 pygame.display.update() elif(gameover == True): SCREEN.blit(IMAGES['gameover'], (overx,overy)) pygame.display.update() gameover =", "= pixelCollision(playerRect, coinRect, pHitMask, cHitMask) if cCollide : return [True, False] return [False,", "in xrange(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def get_key(): while 1: event = pygame.event.poll() if", "and die sounds SOUNDS['hit'].play() if not crashInfo['groundCrash']: SOUNDS['die'].play() while True: for event in", "coins: SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y'])) # pipes' particles if FURYMODE: for particle in particles:", "sprites randPlayer = random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),", "<= playerMidPos < pipeMidPos + 4: score += 1 SOUNDS['point'].play() # playerIndex basex", "pygame.font.init() current_string = [] display_box(screen, question + \": \" + \"\".join(current_string)) while 1:", "2) - 10)) pygame.display.flip() def ask(screen, question): \"ask(screen, question) -> answer\" pygame.font.init() current_string", "False # the hit attribute indicates slider movement due to mouse interaction #", "upperPipes: uPipe['x'] += pipeVelX for lPipe in lowerPipes: lPipe['x'] += pipeVelX # 추가된", "(SCREENWIDTH / 2), 'y': newPipe2[0]['y']}, ] # list of lowerpipe lowerPipes = [", "# player's movement if playerVelY < playerMaxVelY and not playerFlapped: playerVelY += playerAccY", "upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if a pipe must be removed from the list", "추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win' in sys.platform: soundExt =", "visible rotation) playerRot = 45 playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY -", "= int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT * 0.12) easymodex =", "furymodey + IMAGES['furymode-key'].get_height() / 2 basex = 0 # amount by which base", "basex, 'playerIndexGen': playerIndexGen, } elif (event.type == KEYDOWN and (event.key == K_SPACE or", "operator import itemgetter import random import sys import math import pygame from pygame.locals", "can maximum shift to left PIPEGAPSIZE = 130 # gap between upper and", "-1 if playerShm['dir'] == 1: playerShm['val'] += 1 else: playerShm['val'] -= 1 def", "totalWidth) / 2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset", "0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\" Add paticles to the particle", "2) - 12, 204,24), 1) if len(message) != 0: screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)),", "+ \"\".join(current_string)) while 1: inkey = get_key() if inkey == K_BACKSPACE: current_string =", "upper pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's a lower pipe else: lowerPipes.remove(crashTest[3])", "due to mouse interaction # Static graphics - slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf,", "every 5th iteration loopIter = 0 playerx = int(SCREENWIDTH * 0.2) playery =", "to mouse interaction # Static graphics - slider background # self.surf.set_colorkey(BLACK) pygame.draw.rect(self.surf, WHITE,", "\" + \"\".join(current_string)) while 1: inkey = get_key() if inkey == K_BACKSPACE: current_string", "0.6 - PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX =", "#(2) key for easymode if (event.type == KEYDOWN and event.key == K_2) or", "8 and -8\"\"\" if abs(playerShm['val']) == 8: playerShm['dir'] *= -1 if playerShm['dir'] ==", "playery = crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY = 2 playerRot", "when it's a pipe crash if not crashInfo['groundCrash']: if playerRot > -90: playerRot", "= getRandomCoin() newCoin2 = getRandomCoin() coins = [ {'x': SCREENWIDTH + 280, 'y':", "# pipe spawn pipes = getRandomPipe(DIFFICULTY) upperPipes.append(pipes[0]) lowerPipes.append(pipes[1]) # check if a pipe", "+ pipeW) particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH) particle['i'] = random.randint(1, FURYMODE_PARTICLES) -", "= pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if lCollide: # for fury mode we want", "def mainGame(movementInfo): global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0 score = playerIndex =", "iteration loopIter = 0 playerx = int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT -", "return [True, False, False, lPipe] # normal mode return [True, False] return [False,", "a pipe in fury mode if FURYMODE and not crashTest[1]: spawnParticles(particles, crashTest[3]) #", "if pipeindex == 0: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(),", "pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <= playerMidPos < pipeMidPos", "{'x': position-x, 'y': position-y, # 'vx': velocity-x, 'vy': velocity-y, # 'i': index in", "velocity along Y, default same as playerFlapped playerMaxVelY = 10 # max vel", "0 while True: for event in pygame.event.get(): if event.type == QUIT or (event.type", "= ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange except NameError: xrange = range class", "for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def", "particle['vy'] += playerAccY # remove if the particle is under the ground if", "* .35 aMax = math.pi * .25 angle = random.random() * (aMax -", "# if bird collided with pipe lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if", "= random.randrange(20, int(BASEY * 0.6)) coinX = SCREENWIDTH + 100 return [ {'x':", "KEYDOWN and event.key == K_1) or ((event.type == MOUSEBUTTONDOWN and event.button == 1)", "a pipe must be removed from the list for uPipe in upperPipes: if", "playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) coinW = IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for", "part; reacts to movement of the slider button. \"\"\" self.val = (pygame.mouse.get_pos()[0] -", "and event.button == 1): if playery + playerHeight >= BASEY - 1: return", "(255, 255, 255) BLACK = (0, 0, 0) RED = (255, 50, 50)", "of the fury mode button (8 is right padding) furymodeKeyX = furymodex +", "event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1): if playery", "movementInfo = showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome screen animation", "playerFlapped = True SOUNDS['wing'].play() # check for crash here crashTest = checkCrash({'x': playerx,", "'assets/sprites/bluebird-downflap.png', ), # yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list", "= 30 SCREENWIDTH = 288 SCREENHEIGHT = 512 # amount by which base", "player and pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] # if bird", "10)) pygame.display.flip() def ask(screen, question): \"ask(screen, question) -> answer\" pygame.font.init() current_string = []", "index of player to blit on screen playerIndex = 0 playerIndexGen = cycle([0,", "furymodex + IMAGES['furymode'].get_width() + 8 furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() / 2 basex", "event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit()", "screen.blit(fontobject1.render(\"HIGH SCORE!!!\", 1, (255,255,255)), ((screen.get_width() / 2) - 75, (screen.get_height() / 2) -", "SOUNDS[sound].set_volume(SLIDER.val) while True: for event in pygame.event.get(): if event.type == QUIT or (event.type", "# rotate only when it's a pipe crash if not crashInfo['groundCrash']: if playerRot", "= '.wav' else: soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] =", "uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) for lPipe in lowerPipes: SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))", "'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # (1) key for", "player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if player crashes into ground", "SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def spawnParticles(particles, pipe): \"\"\" Add paticles to", "rotation playerVelRot = 3 # angular speed playerRotThr = 20 # rotation threshold", "pass def display_box(screen, message): fontobject = pygame.font.Font(None,18) fontobject1 = pygame.font.Font(None,30) \"Print a message", "self.ypos = ypos # y-location on screen self.surf = pygame.surface.Surface((95, 40)) self.hit =", "6), 6, 0) def draw(self): \"\"\" Combination of static and dynamic graphics in", "pipe in fury mode if FURYMODE and not crashTest[1]: spawnParticles(particles, crashTest[3]) # remove", "amount (for each pipe) FURYMODE_PARTICLES = 8 # max particles for each pipe", "- aMin) + aMin particle['vx'] = math.cos(angle) * vel particle['vy'] = math.sin(angle) *", "player['h']) coinW = IMAGES['coin'].get_width() coinH = IMAGES['coin'].get_height() for coin in coins: coinRect =", "the middle of the screen\" pygame.draw.rect(screen, (0,0,0), ((screen.get_width() / 2) - 100, (screen.get_height()", "playerRot -= playerVelRot # draw sprites overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2)", "= 0 playerIndexGen = cycle([0, 1, 2, 1]) # iterator used to change", "'basex': basex, 'upperPipes': upperPipes, 'lowerPipes': lowerPipes, 'score': score, 'playerVelY': playerVelY, 'playerRot': playerRot }", "% 30 basex = -((-basex + 100) % baseShift) # rotate the player", "global SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy", "random.randrange(20, int(BASEY * 0.6)) coinX = SCREENWIDTH + 100 return [ {'x': coinX,", "== K_UP)) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())): SOUNDS['wing'].play()", "HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) # 추가된 부분 HITMASKS['coin'] = (", "on flap playerVelY = -9 # player's velocity along Y, default same as", "IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY - playery - playerHeight) # move pipes to", "change playerIndex after every 5th iteration loopIter = 0 playerx = int(SCREENWIDTH *", "0.2), movementInfo['playery'] basex = movementInfo['basex'] baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # no need", "def showScore(score): \"\"\"displays score in center of screen\"\"\" scoreDigits = [int(x) for x", "== K_2) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())): EASYMODE", "= False EASYMODE = False # In fury mode, the pipe sapwn system", "the pipe # it's an upper pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's", "= IMAGES['base'].get_width() - IMAGES['background'].get_width() # no need to spawn pipes at start if", "with pipe's rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width() pipeH", "screen playerShmVals = {'val': 0, 'dir': 1} # initialize volume for sound in", "pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX): particle = {}", "elif inkey <= 127: current_string.append(chr(inkey)) display_box(screen, question + \": \" + \"\".join(current_string)) return", "playerFlapAcc playerFlapped = True SOUNDS['wing'].play() # check for crash here crashTest = checkCrash({'x':", "an image's alpha.\"\"\" mask = [] for x in xrange(image.get_width()): mask.append([]) for y", "movementInfo['playerIndexGen'] playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery'] basex = movementInfo['basex'] baseShift =", "'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V',", "the screen if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분 if coins[0]['x']", "just their rects\"\"\" rect = rect1.clip(rect2) if rect.width == 0 or rect.height ==", "= True basex = crashInfo['basex'] upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes'] # play hit", "'vy': velocity-y, # 'i': index in textures list} particles = [] # 추가된", "# True when player flaps # The counter to spawn new pipes furymodePipeFrameCounter", "= showWelcomeAnimation() crashInfo = mainGame(movementInfo) showGameOverScreen(crashInfo) def showWelcomeAnimation(): \"\"\"Shows welcome screen animation of", "= int((SCREENWIDTH - IMAGES['easymode'].get_width())/2) easymodey = int(SCREENHEIGHT * 0.68) hardmodex = int((SCREENWIDTH -", "crashTest[3]) # remove the pipe # it's an upper pipe if crashTest[2]: upperPipes.remove(crashTest[3])", "screen self.surf = pygame.surface.Surface((95, 40)) self.hit = False # the hit attribute indicates", "} # (1) key for fury mode if (event.type == KEYDOWN and event.key", "= pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask = HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide =", "in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) showScore(score)", "= random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight", "False, True, uPipe] # normal mode return [True, False] for lPipe in lowerPipes:", "pipes' particles for fury mode # pipes are green if pipeindex == 0:", "lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask =", "= getRandomPipe(DIFFICULTY) newPipe2 = getRandomPipe(DIFFICULTY) # list of upper pipes upperPipes = [", "200, 'y': newPipe1[0]['y']}, {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},", "pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) if uCollide: # for fury mode we want to", "MOUSEBUTTONDOWN and event.button == 1): if playery > -2 * IMAGES['player'][0].get_height(): playerVelY =", "while True: for event in pygame.event.get(): if event.type == QUIT or (event.type ==", "into ground if player['y'] + player['h'] >= BASEY - 1: return [True, True]", "= math.cos(angle) * vel particle['vy'] = math.sin(angle) * vel particles.append(particle) # sound effect", "0) def draw(self): \"\"\" Combination of static and dynamic graphics in a copy", "-IMAGES['coin'].get_width(): coins.pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe in upperPipes: SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'],", "generated with pipe's rectangle (hitbox) \"\"\" global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS pipeW = IMAGES['pipe'][0].get_width()", "255) TRANS = (1, 1, 1) TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)] FPS = 30 SCREENWIDTH = 288", "== K_MINUS: current_string.append(\"_\") elif inkey <= 127: current_string.append(chr(inkey)) display_box(screen, question + \": \"", "- self.mini) + self.mini if self.val < self.mini: self.val = self.mini if self.val", "rect.x - rect1.x, rect.y - rect1.y x2, y2 = rect.x - rect2.x, rect.y", "pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha()) # 추가된 부분 IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha() # sounds if 'win' in sys.platform:", "} # 추가된 부분 if coinTest[0]: score += 1 SOUNDS['point'].play() coins.pop(0) # check", "if playerRot <= playerRotThr: visibleRot = playerRot playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot) SCREEN.blit(playerSurface, (playerx,", "(0,0)) for uPipe, lPipe in zip(upperPipes, lowerPipes): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))", "50) CYAN = (0, 255, 255) MAGENTA = (255, 0, 255) TRANS =", "for pipe in upperPipes: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if pipeMidPos", "playerFlapped = False # True when player flaps # The counter to spawn", "base can maximum shift to left PIPEGAPSIZE = 130 # gap between upper", "pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # the \"fury mode\" button", "'playerIndexGen': playerIndexGen, } # adjust playery, playerIndex, basex if (loopIter + 1) %", "45 # player's rotation playerVelRot = 3 # angular speed playerRotThr = 20", "amount by which base can maximum shift to left baseShift = IMAGES['base'].get_width() -", "0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return [ {'x': pipeX,", "in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth) / 2 for", "1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites randPlayer = random.randint(0, len(PLAYERS_LIST)", "= 10 # max vel along Y, max descend speed playerMinVelY = -8", "by which base can maximum shift to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()", "BASEY)) # print score so player overlaps the score showScore(score) # Player rotation", "'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) #", "IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT", "list of upper pipes upperPipes = [ {'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},", "* 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return [ {'x':", "bird\"\"\" global FURYMODE, EASYMODE # index of player to blit on screen playerIndex", "volume slider if SLIDER.hit: SLIDER.move() for sounds in SOUNDS: SOUNDS[sounds].set_volume(SLIDER.val) #(2) key for", "75, (SCREEN.get_height() / 2) -160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update() def playerShm(playerShm): \"\"\"oscillates the", "'y': gapY - pipeHeight}, # upper pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE},", "pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(), ) else: IMAGES['pipe-particle'] = ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(),", "playerShm['dir'] *= -1 if playerShm['dir'] == 1: playerShm['val'] += 1 else: playerShm['val'] -=", "pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'], player['y'],", "maxi # maximum at slider position right self.mini = mini # minimum at", "button. \"\"\" self.val = (pygame.mouse.get_pos()[0] - self.xpos - 10) / 80 * (self.maxi", "-4 # player velocity, max velocity, downward accleration, accleration on flap playerVelY =", "uPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask =", "CYAN = (0, 255, 255) MAGENTA = (255, 0, 255) TRANS = (1,", "1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height() / 2) -160 + (50*i))) FPSCLOCK.tick(FPS) pygame.display.update()", "SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self): \"\"\" The dynamic part; reacts to movement of", "# 추가된 부분 newCoin1 = getRandomCoin() newCoin2 = getRandomCoin() coins = [ {'x':", "= playerFlapAcc playerFlapped = True SOUNDS['wing'].play() # check for crash here crashTest =", "playerVelY = playerFlapAcc playerFlapped = True SOUNDS['wing'].play() # check for crash here crashTest", "WHITE, [5, 30, 80, 5], 0) # dynamic graphics - button surface #", "furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2) furymodey = int(SCREENHEIGHT * 0.80) #", "-IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분 if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) # draw", "y shift if playery + playerHeight < BASEY - 1: playery += min(playerVelY,", "'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', )", "for score playerMidPos = playerx + IMAGES['player'][0].get_width() / 2 for pipe in upperPipes:", "about to touch left of screen if 0 < upperPipes[0]['x'] < 5: newPipe", "random.random() * 10 + 5 aMin = -math.pi * .35 aMax = math.pi", "playery - playerHeight) # player velocity change if playerVelY < 15: playerVelY +=", "all possible players (tuple of 3 positions of flap) PLAYERS_LIST = ( #", "(SCREEN.get_height() / 2) -220)) for i in range(0,5) : SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() /", "lPipeRect, pHitMask, lHitmask) if lCollide: # for fury mode we want to break", "pipe hit FURYMODE_PARTICLES_MAX = 48 # list of all possible players (tuple of", "- IMAGES['background'].get_width() # no need to spawn pipes at start if FURYMODE: #", "coins): pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() playerRect = pygame.Rect(player['x'],", "1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE = 100 + DIFFICULTY * 10 \"\"\" returns a", "< -IMAGES['pipe'][0].get_width(): upperPipes.remove(uPipe) for lPipe in lowerPipes: if lPipe['x'] < -IMAGES['pipe'][0].get_width(): lowerPipes.remove(lPipe) #", "global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE DIFFICULTY = 0 score = playerIndex = loopIter =", "= -9 # players speed on flapping playerFlapped = False # True when", "x in list(str(score))] totalWidth = 0 # total width of all numbers to", "of all possible players (tuple of 3 positions of flap) PLAYERS_LIST = (", "player's movement if playerVelY < playerMaxVelY and not playerFlapped: playerVelY += playerAccY if", "pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z:", "return [True, True] else: playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width()", "particles # a particle is an object with attributes: # {'x': position-x, 'y':", "using an image's alpha.\"\"\" mask = [] for x in xrange(image.get_width()): mask.append([]) for", "particles for each pipe hit FURYMODE_PARTICLES_MAX = 48 # list of all possible", "left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm for up-down motion on", "K_ESCAPE): pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if SLIDER.button_rect.collidepoint(pos): SLIDER.hit", "= ( pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(), pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(), ) # hismask", "random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) #", "= furymodex + IMAGES['furymode'].get_width() + 8 furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() / 2", "in upperPipes: uPipe['x'] += pipeVelX for lPipe in lowerPipes: lPipe['x'] += pipeVelX #", "overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2) overy = int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'],", "pipeH = IMAGES['pipe'][0].get_height() for uPipe in upperPipes: # pipe rect uPipeRect = pygame.Rect(uPipe['x'],", "* 0.2 playery = crashInfo['y'] playerHeight = IMAGES['player'][0].get_height() playerVelY = crashInfo['playerVelY'] playerAccY =", "current_string.append(\"_\") elif inkey <= 127: current_string.append(chr(inkey)) display_box(screen, question + \": \" + \"\".join(current_string))", "of flap) PLAYERS_LIST = ( # red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ),", "Y, max descend speed playerMinVelY = -8 # min vel along Y, max", "-((-basex + 100) % baseShift) # rotate the player if playerRot > -90:", "pygame.Rect(coin['x'], coin['y'], coinW, coinH) pHitMask = HITMASKS['player'][pi] cHitMask = HITMASKS['coin'][0] cCollide = pixelCollision(playerRect,", "50) BLUE = (50, 50, 255) GREY = (200, 200, 200) ORANGE =", "< -IMAGES['pipe'][0].get_width(): lowerPipes.pop(0) upperPipes.pop(0) # 추가된 부분 if coins[0]['x'] < -IMAGES['coin'].get_width(): coins.pop(0) #", "elif inkey == K_MINUS: current_string.append(\"_\") elif inkey <= 127: current_string.append(chr(inkey)) display_box(screen, question +", "playerHeight) # move pipes to left for uPipe in upperPipes: uPipe['x'] += pipeVelX", "pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) # player and pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask", "'assets/sprites/background-night.png', ) # list of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try:", "speed playerAccY = 1 # players downward accleration playerRot = 45 # player's", "= ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # pipes' particles for fury mode", "playerShm['dir'] == 1: playerShm['val'] += 1 else: playerShm['val'] -= 1 def getRandomPipe(DIFFICULTY): PIPEGAPSIZE", "DIFFICULTY * 10 \"\"\" returns a randomly generated pipe \"\"\" # y of", "= -9 # player's velocity along Y, default same as playerFlapped playerMaxVelY =", "in lowerPipes: # pipe rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player", "pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) try: xrange except NameError: xrange =", "# for fury mode we want to break the pipe so we #", "pipe rect lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player and pipe hitmasks", "and not just their rects\"\"\" rect = rect1.clip(rect2) if rect.width == 0 or", "= IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for i in range(FURYMODE_PARTICLES_MAX): particle = {} particle['x']", "IMAGES['gameover'].get_width()) / 2) overy = int(SCREENHEIGHT * 0.5) #SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe", "( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]),", "pipe sapwn system is different than in # normal mode, we add pipes", "except NameError: xrange = range class Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b: 'B',", "* 0.79 # image, sound and hitmask dicts IMAGES, SOUNDS, HITMASKS = {},", "pygame.K_y: 'Y', pygame.K_z: 'Z'} def main(): global SCREEN, FPSCLOCK, SLIDER pygame.init() FPSCLOCK =", "player['h'] >= BASEY - 1: return [True, True] else: playerRect = pygame.Rect(player['x'], player['y'],", "# The counter to spawn new pipes furymodePipeFrameCounter = 0 while True: for", "rotation) playerRot = 45 playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY - playery", "to correct screen position # screen SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(surf, (self.xpos, self.ypos)) def move(self):", "it's an upper pipe if crashTest[2]: upperPipes.remove(crashTest[3]) score+=1 # it's a lower pipe", "GREY = (200, 200, 200) ORANGE = (200, 100, 50) CYAN = (0,", "= (255, 255, 255) BLACK = (0, 0, 0) RED = (255, 50," ]
[ "django.utils.translation import ugettext_lazy as _ from touchtechnology.news.models import Article, Category register = Library()", "@register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_(\"Latest News\")): articles = Article.objects.live() paginator = Paginator(articles,", "context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_(\"Latest News\")): articles = Article.objects.live() paginator =", "import ugettext_lazy as _ from touchtechnology.news.models import Article, Category register = Library() @register.filter(\"category\")", "from django.core.paginator import Paginator from django.template import Library from django.utils.translation import ugettext_lazy as", "import Paginator from django.template import Library from django.utils.translation import ugettext_lazy as _ from", "to it. Otherwise we select all article categories. \"\"\" if article is None:", "custom templates context['slice'] = ':' context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def", "django.template import Library from django.utils.translation import ugettext_lazy as _ from touchtechnology.news.models import Article,", "not None: articles = articles[:int(limit)] # FIXME backwards compatibility for custom templates context['slice']", "provided, then we select categories relating to it. Otherwise we select all article", "= ':' context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None):", "= article.categories.live() articles = Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct() if order_by", "article.categories.live() articles = Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct() if order_by is", "':' context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None): \"\"\"", "takes_context=True) def latest_articles(context, count=5, title=_(\"Latest News\")): articles = Article.objects.live() paginator = Paginator(articles, count)", "def related_articles(context, article, limit=None, order_by=None): categories = article.categories.live() articles = Article.objects.live() \\ .exclude(pk=article.pk)", "article, limit=None, order_by=None): categories = article.categories.live() articles = Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories)", "ugettext_lazy as _ from touchtechnology.news.models import Article, Category register = Library() @register.filter(\"category\") def", "limit=None): \"\"\" If an article is provided, then we select categories relating to", "Paginator from django.template import Library from django.utils.translation import ugettext_lazy as _ from touchtechnology.news.models", "return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_(\"Latest News\")): articles = Article.objects.live() paginator", "if order_by is not None: articles = articles.order_by(*order_by.split(',')) if limit is not None:", "= articles[:int(limit)] # FIXME backwards compatibility for custom templates context['slice'] = ':' context['article_list']", "compatibility for custom templates context['slice'] = ':' context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html',", "= Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct() if order_by is not None:", "order_by is not None: articles = articles.order_by(*order_by.split(',')) if limit is not None: articles", "def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None, order_by=None): categories =", "None: articles = articles.order_by(*order_by.split(',')) if limit is not None: articles = articles[:int(limit)] #", "we select categories relating to it. Otherwise we select all article categories. \"\"\"", "categories. \"\"\" if article is None: categories = Category.objects.all() else: categories = article.categories.all()", "takes_context=True) def related_articles(context, article, limit=None, order_by=None): categories = article.categories.live() articles = Article.objects.live() \\", "categories = article.categories.all() context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5,", "= Paginator(articles, count) page = paginator.page(1) context['paginator'] = paginator context['page'] = page context['article_list']", "article is None: categories = Category.objects.all() else: categories = article.categories.all() context['category_list'] = categories", "page = paginator.page(1) context['paginator'] = paginator context['page'] = page context['article_list'] = page.object_list context['title']", "@register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None): \"\"\" If an article is provided, then", "get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None, order_by=None): categories = article.categories.live()", "Otherwise we select all article categories. \"\"\" if article is None: categories =", "we select all article categories. \"\"\" if article is None: categories = Category.objects.all()", "not None: articles = articles.order_by(*order_by.split(',')) if limit is not None: articles = articles[:int(limit)]", "@register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None, order_by=None): categories", "count=5, title=_(\"Latest News\")): articles = Article.objects.live() paginator = Paginator(articles, count) page = paginator.page(1)", "def related_categories(context, article=None, limit=None): \"\"\" If an article is provided, then we select", "paginator = Paginator(articles, count) page = paginator.page(1) context['paginator'] = paginator context['page'] = page", "FIXME backwards compatibility for custom templates context['slice'] = ':' context['article_list'] = articles return", "articles[:int(limit)] # FIXME backwards compatibility for custom templates context['slice'] = ':' context['article_list'] =", "select all article categories. \"\"\" if article is None: categories = Category.objects.all() else:", ".distinct() if order_by is not None: articles = articles.order_by(*order_by.split(',')) if limit is not", "= Library() @register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None,", "= article.categories.all() context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_(\"Latest", "\\ .filter(categories__in=categories) \\ .distinct() if order_by is not None: articles = articles.order_by(*order_by.split(',')) if", "= Article.objects.live() paginator = Paginator(articles, count) page = paginator.page(1) context['paginator'] = paginator context['page']", "import Article, Category register = Library() @register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True)", "then we select categories relating to it. Otherwise we select all article categories.", "title=_(\"Latest News\")): articles = Article.objects.live() paginator = Paginator(articles, count) page = paginator.page(1) context['paginator']", "articles = articles.order_by(*order_by.split(',')) if limit is not None: articles = articles[:int(limit)] # FIXME", "article.categories.all() context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_(\"Latest News\")):", "it. Otherwise we select all article categories. \"\"\" if article is None: categories", "categories = article.categories.live() articles = Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct() if", "context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None): \"\"\" If an article is provided,", "context['paginator'] = paginator context['page'] = page context['article_list'] = page.object_list context['title'] = title return", "latest_articles(context, count=5, title=_(\"Latest News\")): articles = Article.objects.live() paginator = Paginator(articles, count) page =", "related_articles(context, article, limit=None, order_by=None): categories = article.categories.live() articles = Article.objects.live() \\ .exclude(pk=article.pk) \\", "categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_(\"Latest News\")): articles = Article.objects.live()", "categories = Category.objects.all() else: categories = article.categories.all() context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html',", "if limit is not None: articles = articles[:int(limit)] # FIXME backwards compatibility for", "None: categories = Category.objects.all() else: categories = article.categories.all() context['category_list'] = categories return context", "as _ from touchtechnology.news.models import Article, Category register = Library() @register.filter(\"category\") def get_category(slug):", "else: categories = article.categories.all() context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context,", "None: articles = articles[:int(limit)] # FIXME backwards compatibility for custom templates context['slice'] =", "relating to it. Otherwise we select all article categories. \"\"\" if article is", "_ from touchtechnology.news.models import Article, Category register = Library() @register.filter(\"category\") def get_category(slug): return", "order_by=None): categories = article.categories.live() articles = Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct()", "Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct() if order_by is not None: articles", "\"\"\" if article is None: categories = Category.objects.all() else: categories = article.categories.all() context['category_list']", "limit is not None: articles = articles[:int(limit)] # FIXME backwards compatibility for custom", "articles.order_by(*order_by.split(',')) if limit is not None: articles = articles[:int(limit)] # FIXME backwards compatibility", "= articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None): \"\"\" If an", ".exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct() if order_by is not None: articles = articles.order_by(*order_by.split(','))", "if article is None: categories = Category.objects.all() else: categories = article.categories.all() context['category_list'] =", "Article, Category register = Library() @register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def", "\"\"\" If an article is provided, then we select categories relating to it.", "is provided, then we select categories relating to it. Otherwise we select all", "@register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None, order_by=None): categories = article.categories.live() articles = Article.objects.live()", "article=None, limit=None): \"\"\" If an article is provided, then we select categories relating", "is not None: articles = articles.order_by(*order_by.split(',')) if limit is not None: articles =", "= categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_(\"Latest News\")): articles =", "= paginator context['page'] = page context['article_list'] = page.object_list context['title'] = title return context", ".filter(categories__in=categories) \\ .distinct() if order_by is not None: articles = articles.order_by(*order_by.split(',')) if limit", "context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None): \"\"\" If", "Category register = Library() @register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context,", "categories relating to it. Otherwise we select all article categories. \"\"\" if article", "Category.objects.all() else: categories = article.categories.all() context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def", "is not None: articles = articles[:int(limit)] # FIXME backwards compatibility for custom templates", "register = Library() @register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article,", "= Category.objects.all() else: categories = article.categories.all() context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True)", "context['slice'] = ':' context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None,", "django.core.paginator import Paginator from django.template import Library from django.utils.translation import ugettext_lazy as _", "articles = Article.objects.live() paginator = Paginator(articles, count) page = paginator.page(1) context['paginator'] = paginator", "If an article is provided, then we select categories relating to it. Otherwise", "select categories relating to it. Otherwise we select all article categories. \"\"\" if", "context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_(\"Latest News\")): articles", "all article categories. \"\"\" if article is None: categories = Category.objects.all() else: categories", "Library from django.utils.translation import ugettext_lazy as _ from touchtechnology.news.models import Article, Category register", "Library() @register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None, order_by=None):", "def latest_articles(context, count=5, title=_(\"Latest News\")): articles = Article.objects.live() paginator = Paginator(articles, count) page", "articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None): \"\"\" If an article", "templates context['slice'] = ':' context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context,", "article is provided, then we select categories relating to it. Otherwise we select", "is None: categories = Category.objects.all() else: categories = article.categories.all() context['category_list'] = categories return", "import Library from django.utils.translation import ugettext_lazy as _ from touchtechnology.news.models import Article, Category", "Paginator(articles, count) page = paginator.page(1) context['paginator'] = paginator context['page'] = page context['article_list'] =", "articles = Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct() if order_by is not", "= articles.order_by(*order_by.split(',')) if limit is not None: articles = articles[:int(limit)] # FIXME backwards", "\\ .distinct() if order_by is not None: articles = articles.order_by(*order_by.split(',')) if limit is", "for custom templates context['slice'] = ':' context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True)", "from django.template import Library from django.utils.translation import ugettext_lazy as _ from touchtechnology.news.models import", "Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None, order_by=None): categories = article.categories.live() articles =", "backwards compatibility for custom templates context['slice'] = ':' context['article_list'] = articles return context", "article categories. \"\"\" if article is None: categories = Category.objects.all() else: categories =", "# FIXME backwards compatibility for custom templates context['slice'] = ':' context['article_list'] = articles", "an article is provided, then we select categories relating to it. Otherwise we", "\\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\ .distinct() if order_by is not None: articles =", "Article.objects.live() paginator = Paginator(articles, count) page = paginator.page(1) context['paginator'] = paginator context['page'] =", "return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None, order_by=None): categories = article.categories.live() articles", "return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None): \"\"\" If an article is", "takes_context=True) def related_categories(context, article=None, limit=None): \"\"\" If an article is provided, then we", "related_categories(context, article=None, limit=None): \"\"\" If an article is provided, then we select categories", "limit=None, order_by=None): categories = article.categories.live() articles = Article.objects.live() \\ .exclude(pk=article.pk) \\ .filter(categories__in=categories) \\", "paginator.page(1) context['paginator'] = paginator context['page'] = page context['article_list'] = page.object_list context['title'] = title", "from touchtechnology.news.models import Article, Category register = Library() @register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug)", "count) page = paginator.page(1) context['paginator'] = paginator context['page'] = page context['article_list'] = page.object_list", "from django.utils.translation import ugettext_lazy as _ from touchtechnology.news.models import Article, Category register =", "touchtechnology.news.models import Article, Category register = Library() @register.filter(\"category\") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html',", "articles = articles[:int(limit)] # FIXME backwards compatibility for custom templates context['slice'] = ':'", "News\")): articles = Article.objects.live() paginator = Paginator(articles, count) page = paginator.page(1) context['paginator'] =", "= paginator.page(1) context['paginator'] = paginator context['page'] = page context['article_list'] = page.object_list context['title'] =" ]
[ "= \"example\" class __Dimens: def __init__(self): self.test_int = 42 self.test_int_2 = 17 R", "Resources: def __init__(self): self.string = self.__Strings() self.id = self.__Ids() self.dimen = self.__Dimens() class", "\"O campo deve possuir no máximo %(max_length)d caracteres.\" self.field_length_range = \"O campo deve", "cadastrado.\" self.unique_field = \"Valor já registrado.\" self.field_min_length_singular = \"O campo deve possuir no", "lançada para esse campo\" class __Ids: def __init__(self): self.example = \"example\" class __Dimens:", "erro sempre será lançada para esse campo\" class __Ids: def __init__(self): self.example =", "[<NAME> - <EMAIL>] # ====================================================================================================================== class Resources: def __init__(self): self.string = self.__Strings() self.id", "possuir no máximo %(max_length)d caracteres.\" self.field_length_range = \"O campo deve possuir entre %(min_length)d", "= \"static\" self.toast = \"toast\" self.category_separator = \"-\" class __Validators: def __init__(self): self.required_field", "ser igual ao campo %(other_name)s.\" self.always_error = \"Essa mensagem de erro sempre será", "python # -*- coding: utf-8 -*- # ====================================================================================================================== # The MIT License (MIT)", "= \"Mensagem de teste 2\" self.static = \"static\" self.toast = \"toast\" self.category_separator =", "self.field_length_range = \"O campo deve possuir entre %(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name =", "%(min_length)d caracteres.\" self.field_max_length_singular = \"O campo deve possuir no máximo %(max_length)d caracter.\" self.field_max_length_plural", "%(other_name)s.\" self.always_error = \"Essa mensagem de erro sempre será lançada para esse campo\"", "mínimo %(min_length)d caracteres.\" self.field_max_length_singular = \"O campo deve possuir no máximo %(max_length)d caracter.\"", "campo deve possuir no máximo %(max_length)d caracter.\" self.field_max_length_plural = \"O campo deve possuir", "self.category_separator = \"-\" class __Validators: def __init__(self): self.required_field = \"Campo obrigatório.\" self.invalid_email_format =", "= \"O campo deve possuir no mínimo %(min_length)d caracteres.\" self.field_max_length_singular = \"O campo", "caracteres.\" self.invalid_field_name = \"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo precisa ser", "%(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name = \"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este", "ao campo %(other_name)s.\" self.always_error = \"Essa mensagem de erro sempre será lançada para", "self.invalid_field_name = \"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo precisa ser igual", "-*- coding: utf-8 -*- # ====================================================================================================================== # The MIT License (MIT) # ======================================================================================================================", "__Strings: def __init__(self): self.validators = self.__Validators() self.test_message = \"Mensagem de teste\" self.test_message_2 =", "= self.__Validators() self.test_message = \"Mensagem de teste\" self.test_message_2 = \"Mensagem de teste 2\"", "field name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo precisa ser igual ao campo %(other_name)s.\"", "mensagem de erro sempre será lançada para esse campo\" class __Ids: def __init__(self):", "= \"Email já cadastrado.\" self.unique_field = \"Valor já registrado.\" self.field_min_length_singular = \"O campo", "self.always_error = \"Essa mensagem de erro sempre será lançada para esse campo\" class", "utf-8 -*- # ====================================================================================================================== # The MIT License (MIT) # ====================================================================================================================== # Copyright", "%(max_length)d caracter.\" self.field_max_length_plural = \"O campo deve possuir no máximo %(max_length)d caracteres.\" self.field_length_range", "self.field_max_length_singular = \"O campo deve possuir no máximo %(max_length)d caracter.\" self.field_max_length_plural = \"O", "no máximo %(max_length)d caracter.\" self.field_max_length_plural = \"O campo deve possuir no máximo %(max_length)d", "no máximo %(max_length)d caracteres.\" self.field_length_range = \"O campo deve possuir entre %(min_length)d e", "= \"O campo deve possuir no máximo %(max_length)d caracteres.\" self.field_length_range = \"O campo", "\"O campo deve possuir no máximo %(max_length)d caracter.\" self.field_max_length_plural = \"O campo deve", "possuir no máximo %(max_length)d caracter.\" self.field_max_length_plural = \"O campo deve possuir no máximo", "possuir no mínimo %(min_length)d caracteres.\" self.field_max_length_singular = \"O campo deve possuir no máximo", "já cadastrado.\" self.unique_field = \"Valor já registrado.\" self.field_min_length_singular = \"O campo deve possuir", "deve possuir no máximo %(max_length)d caracteres.\" self.field_length_range = \"O campo deve possuir entre", "Copyright (c) 2016 [<NAME> - <EMAIL>] # ====================================================================================================================== class Resources: def __init__(self): self.string", "class __Ids: def __init__(self): self.example = \"example\" class __Dimens: def __init__(self): self.test_int =", "já registrado.\" self.field_min_length_singular = \"O campo deve possuir no mínimo %(min_length)d caracter.\" self.field_min_length_plural", "= \"Formato de email inválido.\" self.email_already_registered = \"Email já cadastrado.\" self.unique_field = \"Valor", "= \"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo precisa ser igual ao", "\"-\" class __Validators: def __init__(self): self.required_field = \"Campo obrigatório.\" self.invalid_email_format = \"Formato de", "__Validators: def __init__(self): self.required_field = \"Campo obrigatório.\" self.invalid_email_format = \"Formato de email inválido.\"", "\"O campo deve possuir no mínimo %(min_length)d caracter.\" self.field_min_length_plural = \"O campo deve", "self.field_must_be_equal_to = \"Este campo precisa ser igual ao campo %(other_name)s.\" self.always_error = \"Essa", "MIT License (MIT) # ====================================================================================================================== # Copyright (c) 2016 [<NAME> - <EMAIL>] #", "deve possuir no mínimo %(min_length)d caracteres.\" self.field_max_length_singular = \"O campo deve possuir no", "mínimo %(min_length)d caracter.\" self.field_min_length_plural = \"O campo deve possuir no mínimo %(min_length)d caracteres.\"", "====================================================================================================================== class Resources: def __init__(self): self.string = self.__Strings() self.id = self.__Ids() self.dimen =", "class __Validators: def __init__(self): self.required_field = \"Campo obrigatório.\" self.invalid_email_format = \"Formato de email", "\"Valor já registrado.\" self.field_min_length_singular = \"O campo deve possuir no mínimo %(min_length)d caracter.\"", "= self.__Ids() self.dimen = self.__Dimens() class __Strings: def __init__(self): self.validators = self.__Validators() self.test_message", "deve possuir no máximo %(max_length)d caracter.\" self.field_max_length_plural = \"O campo deve possuir no", "máximo %(max_length)d caracter.\" self.field_max_length_plural = \"O campo deve possuir no máximo %(max_length)d caracteres.\"", "2016 [<NAME> - <EMAIL>] # ====================================================================================================================== class Resources: def __init__(self): self.string = self.__Strings()", "!/usr/bin/env python # -*- coding: utf-8 -*- # ====================================================================================================================== # The MIT License", "self.validators = self.__Validators() self.test_message = \"Mensagem de teste\" self.test_message_2 = \"Mensagem de teste", "def __init__(self): self.validators = self.__Validators() self.test_message = \"Mensagem de teste\" self.test_message_2 = \"Mensagem", "self.__Ids() self.dimen = self.__Dimens() class __Strings: def __init__(self): self.validators = self.__Validators() self.test_message =", "de email inválido.\" self.email_already_registered = \"Email já cadastrado.\" self.unique_field = \"Valor já registrado.\"", "self.string = self.__Strings() self.id = self.__Ids() self.dimen = self.__Dimens() class __Strings: def __init__(self):", "= \"Valor já registrado.\" self.field_min_length_singular = \"O campo deve possuir no mínimo %(min_length)d", "caracter.\" self.field_max_length_plural = \"O campo deve possuir no máximo %(max_length)d caracteres.\" self.field_length_range =", "====================================================================================================================== # Copyright (c) 2016 [<NAME> - <EMAIL>] # ====================================================================================================================== class Resources: def", "teste\" self.test_message_2 = \"Mensagem de teste 2\" self.static = \"static\" self.toast = \"toast\"", "de teste 2\" self.static = \"static\" self.toast = \"toast\" self.category_separator = \"-\" class", "# Copyright (c) 2016 [<NAME> - <EMAIL>] # ====================================================================================================================== class Resources: def __init__(self):", "no mínimo %(min_length)d caracter.\" self.field_min_length_plural = \"O campo deve possuir no mínimo %(min_length)d", "# -*- coding: utf-8 -*- # ====================================================================================================================== # The MIT License (MIT) #", "(MIT) # ====================================================================================================================== # Copyright (c) 2016 [<NAME> - <EMAIL>] # ====================================================================================================================== class", "caracteres.\" self.field_max_length_singular = \"O campo deve possuir no máximo %(max_length)d caracter.\" self.field_max_length_plural =", "self.required_field = \"Campo obrigatório.\" self.invalid_email_format = \"Formato de email inválido.\" self.email_already_registered = \"Email", "sempre será lançada para esse campo\" class __Ids: def __init__(self): self.example = \"example\"", "teste 2\" self.static = \"static\" self.toast = \"toast\" self.category_separator = \"-\" class __Validators:", "__init__(self): self.required_field = \"Campo obrigatório.\" self.invalid_email_format = \"Formato de email inválido.\" self.email_already_registered =", "# ====================================================================================================================== # Copyright (c) 2016 [<NAME> - <EMAIL>] # ====================================================================================================================== class Resources:", "máximo %(max_length)d caracteres.\" self.field_length_range = \"O campo deve possuir entre %(min_length)d e %(max_length)d", "self.__Validators() self.test_message = \"Mensagem de teste\" self.test_message_2 = \"Mensagem de teste 2\" self.static", "campo deve possuir entre %(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name = \"Invalid field name", "(c) 2016 [<NAME> - <EMAIL>] # ====================================================================================================================== class Resources: def __init__(self): self.string =", "igual ao campo %(other_name)s.\" self.always_error = \"Essa mensagem de erro sempre será lançada", "self.toast = \"toast\" self.category_separator = \"-\" class __Validators: def __init__(self): self.required_field = \"Campo", "e %(max_length)d caracteres.\" self.invalid_field_name = \"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo", "class __Dimens: def __init__(self): self.test_int = 42 self.test_int_2 = 17 R = Resources()", "\"Este campo precisa ser igual ao campo %(other_name)s.\" self.always_error = \"Essa mensagem de", "= \"Essa mensagem de erro sempre será lançada para esse campo\" class __Ids:", "no mínimo %(min_length)d caracteres.\" self.field_max_length_singular = \"O campo deve possuir no máximo %(max_length)d", "# !/usr/bin/env python # -*- coding: utf-8 -*- # ====================================================================================================================== # The MIT", "- <EMAIL>] # ====================================================================================================================== class Resources: def __init__(self): self.string = self.__Strings() self.id =", "= \"-\" class __Validators: def __init__(self): self.required_field = \"Campo obrigatório.\" self.invalid_email_format = \"Formato", "\"O campo deve possuir entre %(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name = \"Invalid field", "2\" self.static = \"static\" self.toast = \"toast\" self.category_separator = \"-\" class __Validators: def", "campo precisa ser igual ao campo %(other_name)s.\" self.always_error = \"Essa mensagem de erro", "self.static = \"static\" self.toast = \"toast\" self.category_separator = \"-\" class __Validators: def __init__(self):", "\"example\" class __Dimens: def __init__(self): self.test_int = 42 self.test_int_2 = 17 R =", "deve possuir no mínimo %(min_length)d caracter.\" self.field_min_length_plural = \"O campo deve possuir no", "License (MIT) # ====================================================================================================================== # Copyright (c) 2016 [<NAME> - <EMAIL>] # ======================================================================================================================", "self.field_min_length_plural = \"O campo deve possuir no mínimo %(min_length)d caracteres.\" self.field_max_length_singular = \"O", "# ====================================================================================================================== # The MIT License (MIT) # ====================================================================================================================== # Copyright (c) 2016", "%(max_length)d caracteres.\" self.invalid_field_name = \"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo precisa", "# ====================================================================================================================== class Resources: def __init__(self): self.string = self.__Strings() self.id = self.__Ids() self.dimen", "de teste\" self.test_message_2 = \"Mensagem de teste 2\" self.static = \"static\" self.toast =", "\"Mensagem de teste 2\" self.static = \"static\" self.toast = \"toast\" self.category_separator = \"-\"", "registrado.\" self.field_min_length_singular = \"O campo deve possuir no mínimo %(min_length)d caracter.\" self.field_min_length_plural =", "self.__Dimens() class __Strings: def __init__(self): self.validators = self.__Validators() self.test_message = \"Mensagem de teste\"", "\"toast\" self.category_separator = \"-\" class __Validators: def __init__(self): self.required_field = \"Campo obrigatório.\" self.invalid_email_format", "self.dimen = self.__Dimens() class __Strings: def __init__(self): self.validators = self.__Validators() self.test_message = \"Mensagem", "__init__(self): self.example = \"example\" class __Dimens: def __init__(self): self.test_int = 42 self.test_int_2 =", "'%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo precisa ser igual ao campo %(other_name)s.\" self.always_error =", "%(min_length)d caracter.\" self.field_min_length_plural = \"O campo deve possuir no mínimo %(min_length)d caracteres.\" self.field_max_length_singular", "= \"toast\" self.category_separator = \"-\" class __Validators: def __init__(self): self.required_field = \"Campo obrigatório.\"", "email inválido.\" self.email_already_registered = \"Email já cadastrado.\" self.unique_field = \"Valor já registrado.\" self.field_min_length_singular", "para esse campo\" class __Ids: def __init__(self): self.example = \"example\" class __Dimens: def", "\"O campo deve possuir no mínimo %(min_length)d caracteres.\" self.field_max_length_singular = \"O campo deve", "class __Strings: def __init__(self): self.validators = self.__Validators() self.test_message = \"Mensagem de teste\" self.test_message_2", "\"static\" self.toast = \"toast\" self.category_separator = \"-\" class __Validators: def __init__(self): self.required_field =", "self.unique_field = \"Valor já registrado.\" self.field_min_length_singular = \"O campo deve possuir no mínimo", "inválido.\" self.email_already_registered = \"Email já cadastrado.\" self.unique_field = \"Valor já registrado.\" self.field_min_length_singular =", "<EMAIL>] # ====================================================================================================================== class Resources: def __init__(self): self.string = self.__Strings() self.id = self.__Ids()", "self.field_max_length_plural = \"O campo deve possuir no máximo %(max_length)d caracteres.\" self.field_length_range = \"O", "self.invalid_email_format = \"Formato de email inválido.\" self.email_already_registered = \"Email já cadastrado.\" self.unique_field =", "= \"O campo deve possuir entre %(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name = \"Invalid", "# The MIT License (MIT) # ====================================================================================================================== # Copyright (c) 2016 [<NAME> -", "self.id = self.__Ids() self.dimen = self.__Dimens() class __Strings: def __init__(self): self.validators = self.__Validators()", "campo deve possuir no mínimo %(min_length)d caracter.\" self.field_min_length_plural = \"O campo deve possuir", "será lançada para esse campo\" class __Ids: def __init__(self): self.example = \"example\" class", "campo\" class __Ids: def __init__(self): self.example = \"example\" class __Dimens: def __init__(self): self.test_int", "def __init__(self): self.string = self.__Strings() self.id = self.__Ids() self.dimen = self.__Dimens() class __Strings:", "caracter.\" self.field_min_length_plural = \"O campo deve possuir no mínimo %(min_length)d caracteres.\" self.field_max_length_singular =", "self.test_message = \"Mensagem de teste\" self.test_message_2 = \"Mensagem de teste 2\" self.static =", "\"Essa mensagem de erro sempre será lançada para esse campo\" class __Ids: def", "= self.__Dimens() class __Strings: def __init__(self): self.validators = self.__Validators() self.test_message = \"Mensagem de", "coding: utf-8 -*- # ====================================================================================================================== # The MIT License (MIT) # ====================================================================================================================== #", "<gh_stars>0 # !/usr/bin/env python # -*- coding: utf-8 -*- # ====================================================================================================================== # The", "caracteres.\" self.field_length_range = \"O campo deve possuir entre %(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name", "possuir no mínimo %(min_length)d caracter.\" self.field_min_length_plural = \"O campo deve possuir no mínimo", "= \"O campo deve possuir no mínimo %(min_length)d caracter.\" self.field_min_length_plural = \"O campo", "possuir entre %(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name = \"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to", "self.__Strings() self.id = self.__Ids() self.dimen = self.__Dimens() class __Strings: def __init__(self): self.validators =", "deve possuir entre %(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name = \"Invalid field name '%(field_name)s'.\"", "campo deve possuir no máximo %(max_length)d caracteres.\" self.field_length_range = \"O campo deve possuir", "\"Campo obrigatório.\" self.invalid_email_format = \"Formato de email inválido.\" self.email_already_registered = \"Email já cadastrado.\"", "-*- # ====================================================================================================================== # The MIT License (MIT) # ====================================================================================================================== # Copyright (c)", "\"Email já cadastrado.\" self.unique_field = \"Valor já registrado.\" self.field_min_length_singular = \"O campo deve", "The MIT License (MIT) # ====================================================================================================================== # Copyright (c) 2016 [<NAME> - <EMAIL>]", "self.field_min_length_singular = \"O campo deve possuir no mínimo %(min_length)d caracter.\" self.field_min_length_plural = \"O", "precisa ser igual ao campo %(other_name)s.\" self.always_error = \"Essa mensagem de erro sempre", "__init__(self): self.validators = self.__Validators() self.test_message = \"Mensagem de teste\" self.test_message_2 = \"Mensagem de", "def __init__(self): self.required_field = \"Campo obrigatório.\" self.invalid_email_format = \"Formato de email inválido.\" self.email_already_registered", "entre %(min_length)d e %(max_length)d caracteres.\" self.invalid_field_name = \"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to =", "= \"Mensagem de teste\" self.test_message_2 = \"Mensagem de teste 2\" self.static = \"static\"", "= \"Campo obrigatório.\" self.invalid_email_format = \"Formato de email inválido.\" self.email_already_registered = \"Email já", "\"Mensagem de teste\" self.test_message_2 = \"Mensagem de teste 2\" self.static = \"static\" self.toast", "obrigatório.\" self.invalid_email_format = \"Formato de email inválido.\" self.email_already_registered = \"Email já cadastrado.\" self.unique_field", "self.email_already_registered = \"Email já cadastrado.\" self.unique_field = \"Valor já registrado.\" self.field_min_length_singular = \"O", "campo %(other_name)s.\" self.always_error = \"Essa mensagem de erro sempre será lançada para esse", "= \"O campo deve possuir no máximo %(max_length)d caracter.\" self.field_max_length_plural = \"O campo", "\"Formato de email inválido.\" self.email_already_registered = \"Email já cadastrado.\" self.unique_field = \"Valor já", "esse campo\" class __Ids: def __init__(self): self.example = \"example\" class __Dimens: def __init__(self):", "%(max_length)d caracteres.\" self.field_length_range = \"O campo deve possuir entre %(min_length)d e %(max_length)d caracteres.\"", "= self.__Strings() self.id = self.__Ids() self.dimen = self.__Dimens() class __Strings: def __init__(self): self.validators", "self.test_message_2 = \"Mensagem de teste 2\" self.static = \"static\" self.toast = \"toast\" self.category_separator", "= \"Este campo precisa ser igual ao campo %(other_name)s.\" self.always_error = \"Essa mensagem", "__Ids: def __init__(self): self.example = \"example\" class __Dimens: def __init__(self): self.test_int = 42", "def __init__(self): self.example = \"example\" class __Dimens: def __init__(self): self.test_int = 42 self.test_int_2", "de erro sempre será lançada para esse campo\" class __Ids: def __init__(self): self.example", "name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo precisa ser igual ao campo %(other_name)s.\" self.always_error", "campo deve possuir no mínimo %(min_length)d caracteres.\" self.field_max_length_singular = \"O campo deve possuir", "self.example = \"example\" class __Dimens: def __init__(self): self.test_int = 42 self.test_int_2 = 17", "\"Invalid field name '%(field_name)s'.\" self.field_must_be_equal_to = \"Este campo precisa ser igual ao campo", "====================================================================================================================== # The MIT License (MIT) # ====================================================================================================================== # Copyright (c) 2016 [<NAME>", "class Resources: def __init__(self): self.string = self.__Strings() self.id = self.__Ids() self.dimen = self.__Dimens()", "__init__(self): self.string = self.__Strings() self.id = self.__Ids() self.dimen = self.__Dimens() class __Strings: def" ]
[ "-*- coding: utf-8 -*- __author__ = \"\"\"<NAME>\"\"\" __email__ = '<EMAIL>' __version__ = '0.1.0'", "# -*- coding: utf-8 -*- __author__ = \"\"\"<NAME>\"\"\" __email__ = '<EMAIL>' __version__ =" ]
[ "Revision ID: <KEY> Revises: 8f176326a337 Create Date: 2020-08-09 23:00:56.671372 \"\"\" from alembic import", "= '<KEY>' down_revision = '8f176326a337' branch_labels = None depends_on = None ROLES =", "Create Date: 2020-08-09 23:00:56.671372 \"\"\" from alembic import op import sqlalchemy as sa", "used by Alembic. revision = '<KEY>' down_revision = '8f176326a337' branch_labels = None depends_on", "= ['admin', 'user'] def upgrade(): op.add_column('users', sa.Column('role', sa.Enum(*ROLES), default='user')) def downgrade(): op.drop_column('users', 'role')", "= '8f176326a337' branch_labels = None depends_on = None ROLES = ['admin', 'user'] def", "sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '8f176326a337'", "None ROLES = ['admin', 'user'] def upgrade(): op.add_column('users', sa.Column('role', sa.Enum(*ROLES), default='user')) def downgrade():", "as sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision =", "'<KEY>' down_revision = '8f176326a337' branch_labels = None depends_on = None ROLES = ['admin',", "8f176326a337 Create Date: 2020-08-09 23:00:56.671372 \"\"\" from alembic import op import sqlalchemy as", "None depends_on = None ROLES = ['admin', 'user'] def upgrade(): op.add_column('users', sa.Column('role', sa.Enum(*ROLES),", "import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision", "identifiers, used by Alembic. revision = '<KEY>' down_revision = '8f176326a337' branch_labels = None", "2020-08-09 23:00:56.671372 \"\"\" from alembic import op import sqlalchemy as sa # revision", "down_revision = '8f176326a337' branch_labels = None depends_on = None ROLES = ['admin', 'user']", "<gh_stars>0 \"\"\"add_user_roles Revision ID: <KEY> Revises: 8f176326a337 Create Date: 2020-08-09 23:00:56.671372 \"\"\" from", "alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic.", "Alembic. revision = '<KEY>' down_revision = '8f176326a337' branch_labels = None depends_on = None", "= None depends_on = None ROLES = ['admin', 'user'] def upgrade(): op.add_column('users', sa.Column('role',", "ID: <KEY> Revises: 8f176326a337 Create Date: 2020-08-09 23:00:56.671372 \"\"\" from alembic import op", "import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '<KEY>'", "branch_labels = None depends_on = None ROLES = ['admin', 'user'] def upgrade(): op.add_column('users',", "from alembic import op import sqlalchemy as sa # revision identifiers, used by", "by Alembic. revision = '<KEY>' down_revision = '8f176326a337' branch_labels = None depends_on =", "# revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '8f176326a337' branch_labels", "revision = '<KEY>' down_revision = '8f176326a337' branch_labels = None depends_on = None ROLES", "Date: 2020-08-09 23:00:56.671372 \"\"\" from alembic import op import sqlalchemy as sa #", "revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '8f176326a337' branch_labels =", "= None ROLES = ['admin', 'user'] def upgrade(): op.add_column('users', sa.Column('role', sa.Enum(*ROLES), default='user')) def", "\"\"\" from alembic import op import sqlalchemy as sa # revision identifiers, used", "op import sqlalchemy as sa # revision identifiers, used by Alembic. revision =", "ROLES = ['admin', 'user'] def upgrade(): op.add_column('users', sa.Column('role', sa.Enum(*ROLES), default='user')) def downgrade(): op.drop_column('users',", "sqlalchemy as sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision", "23:00:56.671372 \"\"\" from alembic import op import sqlalchemy as sa # revision identifiers,", "Revises: 8f176326a337 Create Date: 2020-08-09 23:00:56.671372 \"\"\" from alembic import op import sqlalchemy", "depends_on = None ROLES = ['admin', 'user'] def upgrade(): op.add_column('users', sa.Column('role', sa.Enum(*ROLES), default='user'))", "'8f176326a337' branch_labels = None depends_on = None ROLES = ['admin', 'user'] def upgrade():", "<KEY> Revises: 8f176326a337 Create Date: 2020-08-09 23:00:56.671372 \"\"\" from alembic import op import", "\"\"\"add_user_roles Revision ID: <KEY> Revises: 8f176326a337 Create Date: 2020-08-09 23:00:56.671372 \"\"\" from alembic" ]
[ "return fname + extension def write(self, fname, data, format=None): fname = self._sanitize_fname(fname) with", "fname = fname.replace(\"/\", \"-\") return fname + extension def write(self, fname, data, format=None):", "self._sanitize_fname(fname) output_data = self.normalize_data(data, format) with open(os.path.join(self.path, fname), \"r\") as infile: compdata =", "import lod_api import difflib import json from collections import OrderedDict class MockDataHandler(): def", "= json.loads(data, object_pairs_hook=orderedDict) # data_out = json.dumps(data_json) # elif format == \"jsonl\": #", "sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else: data_out = data # if format == \"json\":", "os import lod_api import difflib import json from collections import OrderedDict class MockDataHandler():", "data_out = json.dumps(data_json) # elif format == \"jsonl\": # lines = [] #", "_sanitize_fname(self, fname, extension=\".dat\"): fname = fname.replace(\"/\", \"-\") return fname + extension def write(self,", "== \"nq\": print(\"sort nq file\") lines = sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else: data_out", "# data_out = json.dumps(data_json) # elif format == \"jsonl\": # lines = []", "= \"\\n\".join(lines) else: data_out = data # if format == \"json\": # data_json", "import difflib import json from collections import OrderedDict class MockDataHandler(): def __init__(self): #", "= json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines) return data_out def _sanitize_fname(self,", "dumps. \"\"\" if format == \"nq\": print(\"sort nq file\") lines = sorted(data.split(\"\\n\")) data_out", "= self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\") as outfile: outfile.write(self.normalize_data(data, format)) def compare(self, fname,", "def compare(self, fname, data, format=None): fname = self._sanitize_fname(fname) output_data = self.normalize_data(data, format) with", "format) with open(os.path.join(self.path, fname), \"r\") as infile: compdata = infile.read() diff = difflib.unified_diff(output_data,", "json.loads(data, object_pairs_hook=orderedDict) # data_out = json.dumps(data_json) # elif format == \"jsonl\": # lines", "format): \"\"\" normalize given data with a given format in order to compare", "else: data_out = data # if format == \"json\": # data_json = json.loads(data,", "fname = self._sanitize_fname(fname) output_data = self.normalize_data(data, format) with open(os.path.join(self.path, fname), \"r\") as infile:", "import OrderedDict class MockDataHandler(): def __init__(self): # define path for test data self.path", "fname + extension def write(self, fname, data, format=None): fname = self._sanitize_fname(fname) with open(os.path.join(self.path,", "to compare them with data dumps. \"\"\" if format == \"nq\": print(\"sort nq", "= fname.replace(\"/\", \"-\") return fname + extension def write(self, fname, data, format=None): fname", "js in data.split(\"\\n\"): # if js: # print(js) # data_json = json.loads(js, object_pairs_hook=orderedDict)", "lines = [] # for js in data.split(\"\\n\"): # if js: # print(js)", "\"\\n\".join(lines) else: data_out = data # if format == \"json\": # data_json =", "# for js in data.split(\"\\n\"): # if js: # print(js) # data_json =", "def write(self, fname, data, format=None): fname = self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\") as", "open(os.path.join(self.path, fname), \"w+\") as outfile: outfile.write(self.normalize_data(data, format)) def compare(self, fname, data, format=None): fname", "<filename>tests/integrationtests/mock_data.py import os import lod_api import difflib import json from collections import OrderedDict", "js: # print(js) # data_json = json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out =", "\"json\": # data_json = json.loads(data, object_pairs_hook=orderedDict) # data_out = json.dumps(data_json) # elif format", "compdata = infile.read() diff = difflib.unified_diff(output_data, compdata, fromfile=\"API-data\", tofile=fname) print(\"\".join(diff)) assert(output_data == compdata)", "for js in data.split(\"\\n\"): # if js: # print(js) # data_json = json.loads(js,", "format == \"nq\": print(\"sort nq file\") lines = sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else:", "with data dumps. \"\"\" if format == \"nq\": print(\"sort nq file\") lines =", "write(self, fname, data, format=None): fname = self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\") as outfile:", "fname), \"r\") as infile: compdata = infile.read() diff = difflib.unified_diff(output_data, compdata, fromfile=\"API-data\", tofile=fname)", "\"\\n\".join(lines) return data_out def _sanitize_fname(self, fname, extension=\".dat\"): fname = fname.replace(\"/\", \"-\") return fname", "output_data = self.normalize_data(data, format) with open(os.path.join(self.path, fname), \"r\") as infile: compdata = infile.read()", "file\") lines = sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else: data_out = data # if", "with a given format in order to compare them with data dumps. \"\"\"", "format == \"json\": # data_json = json.loads(data, object_pairs_hook=orderedDict) # data_out = json.dumps(data_json) #", "# data_out = \"\\n\".join(lines) return data_out def _sanitize_fname(self, fname, extension=\".dat\"): fname = fname.replace(\"/\",", "# data_json = json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines) return data_out", "# if format == \"json\": # data_json = json.loads(data, object_pairs_hook=orderedDict) # data_out =", "__init__(self): # define path for test data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def", "# if js: # print(js) # data_json = json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) #", "== \"jsonl\": # lines = [] # for js in data.split(\"\\n\"): # if", "[] # for js in data.split(\"\\n\"): # if js: # print(js) # data_json", "order to compare them with data dumps. \"\"\" if format == \"nq\": print(\"sort", "= [] # for js in data.split(\"\\n\"): # if js: # print(js) #", "lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines) return data_out def _sanitize_fname(self, fname, extension=\".dat\"): fname =", "lod_api import difflib import json from collections import OrderedDict class MockDataHandler(): def __init__(self):", "fname, extension=\".dat\"): fname = fname.replace(\"/\", \"-\") return fname + extension def write(self, fname,", "return data_out def _sanitize_fname(self, fname, extension=\".dat\"): fname = fname.replace(\"/\", \"-\") return fname +", "json from collections import OrderedDict class MockDataHandler(): def __init__(self): # define path for", "format in order to compare them with data dumps. \"\"\" if format ==", "\"\"\" if format == \"nq\": print(\"sort nq file\") lines = sorted(data.split(\"\\n\")) data_out =", "a given format in order to compare them with data dumps. \"\"\" if", "if js: # print(js) # data_json = json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out", "if format == \"nq\": print(\"sort nq file\") lines = sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines)", "# lines = [] # for js in data.split(\"\\n\"): # if js: #", "= os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data, format): \"\"\" normalize given data with", "open(os.path.join(self.path, fname), \"r\") as infile: compdata = infile.read() diff = difflib.unified_diff(output_data, compdata, fromfile=\"API-data\",", "# elif format == \"jsonl\": # lines = [] # for js in", "print(self.path) def normalize_data(self, data, format): \"\"\" normalize given data with a given format", "data_out def _sanitize_fname(self, fname, extension=\".dat\"): fname = fname.replace(\"/\", \"-\") return fname + extension", "lines = sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else: data_out = data # if format", "infile: compdata = infile.read() diff = difflib.unified_diff(output_data, compdata, fromfile=\"API-data\", tofile=fname) print(\"\".join(diff)) assert(output_data ==", "given format in order to compare them with data dumps. \"\"\" if format", "self.normalize_data(data, format) with open(os.path.join(self.path, fname), \"r\") as infile: compdata = infile.read() diff =", "MockDataHandler(): def __init__(self): # define path for test data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\")", "= json.dumps(data_json) # elif format == \"jsonl\": # lines = [] # for", "given data with a given format in order to compare them with data", "data dumps. \"\"\" if format == \"nq\": print(\"sort nq file\") lines = sorted(data.split(\"\\n\"))", "data_out = \"\\n\".join(lines) return data_out def _sanitize_fname(self, fname, extension=\".dat\"): fname = fname.replace(\"/\", \"-\")", "as outfile: outfile.write(self.normalize_data(data, format)) def compare(self, fname, data, format=None): fname = self._sanitize_fname(fname) output_data", "test data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data, format): \"\"\" normalize", "extension=\".dat\"): fname = fname.replace(\"/\", \"-\") return fname + extension def write(self, fname, data,", "import os import lod_api import difflib import json from collections import OrderedDict class", "\"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data, format): \"\"\" normalize given data with a given", "print(\"sort nq file\") lines = sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else: data_out = data", "object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines) return data_out def _sanitize_fname(self, fname, extension=\".dat\"):", "def _sanitize_fname(self, fname, extension=\".dat\"): fname = fname.replace(\"/\", \"-\") return fname + extension def", "data with a given format in order to compare them with data dumps.", "data_json = json.loads(data, object_pairs_hook=orderedDict) # data_out = json.dumps(data_json) # elif format == \"jsonl\":", "format=None): fname = self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\") as outfile: outfile.write(self.normalize_data(data, format)) def", "normalize_data(self, data, format): \"\"\" normalize given data with a given format in order", "== \"json\": # data_json = json.loads(data, object_pairs_hook=orderedDict) # data_out = json.dumps(data_json) # elif", "= sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else: data_out = data # if format ==", "# print(js) # data_json = json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines)", "# lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines) return data_out def _sanitize_fname(self, fname, extension=\".dat\"): fname", "self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\") as outfile: outfile.write(self.normalize_data(data, format)) def compare(self, fname, data,", "data, format): \"\"\" normalize given data with a given format in order to", "as infile: compdata = infile.read() diff = difflib.unified_diff(output_data, compdata, fromfile=\"API-data\", tofile=fname) print(\"\".join(diff)) assert(output_data", "data.split(\"\\n\"): # if js: # print(js) # data_json = json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json))", "\"w+\") as outfile: outfile.write(self.normalize_data(data, format)) def compare(self, fname, data, format=None): fname = self._sanitize_fname(fname)", "def normalize_data(self, data, format): \"\"\" normalize given data with a given format in", "fname, data, format=None): fname = self._sanitize_fname(fname) output_data = self.normalize_data(data, format) with open(os.path.join(self.path, fname),", "data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data, format): \"\"\" normalize given", "os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data, format): \"\"\" normalize given data with a", "with open(os.path.join(self.path, fname), \"w+\") as outfile: outfile.write(self.normalize_data(data, format)) def compare(self, fname, data, format=None):", "self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data, format): \"\"\" normalize given data", "def __init__(self): # define path for test data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path)", "class MockDataHandler(): def __init__(self): # define path for test data self.path = os.path.join(lod_api.__path__[0],", "import json from collections import OrderedDict class MockDataHandler(): def __init__(self): # define path", "data_json = json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines) return data_out def", "\"-\") return fname + extension def write(self, fname, data, format=None): fname = self._sanitize_fname(fname)", "difflib import json from collections import OrderedDict class MockDataHandler(): def __init__(self): # define", "+ extension def write(self, fname, data, format=None): fname = self._sanitize_fname(fname) with open(os.path.join(self.path, fname),", "extension def write(self, fname, data, format=None): fname = self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\")", "\"nq\": print(\"sort nq file\") lines = sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else: data_out =", "format == \"jsonl\": # lines = [] # for js in data.split(\"\\n\"): #", "json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines) return data_out def _sanitize_fname(self, fname,", "in data.split(\"\\n\"): # if js: # print(js) # data_json = json.loads(js, object_pairs_hook=orderedDict) #", "compare(self, fname, data, format=None): fname = self._sanitize_fname(fname) output_data = self.normalize_data(data, format) with open(os.path.join(self.path,", "if format == \"json\": # data_json = json.loads(data, object_pairs_hook=orderedDict) # data_out = json.dumps(data_json)", "data, format=None): fname = self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\") as outfile: outfile.write(self.normalize_data(data, format))", "for test data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data, format): \"\"\"", "format)) def compare(self, fname, data, format=None): fname = self._sanitize_fname(fname) output_data = self.normalize_data(data, format)", "define path for test data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data,", "data_out = \"\\n\".join(lines) else: data_out = data # if format == \"json\": #", "\"jsonl\": # lines = [] # for js in data.split(\"\\n\"): # if js:", "with open(os.path.join(self.path, fname), \"r\") as infile: compdata = infile.read() diff = difflib.unified_diff(output_data, compdata,", "\"r\") as infile: compdata = infile.read() diff = difflib.unified_diff(output_data, compdata, fromfile=\"API-data\", tofile=fname) print(\"\".join(diff))", "data_out = data # if format == \"json\": # data_json = json.loads(data, object_pairs_hook=orderedDict)", "data, format=None): fname = self._sanitize_fname(fname) output_data = self.normalize_data(data, format) with open(os.path.join(self.path, fname), \"r\")", "\"\"\" normalize given data with a given format in order to compare them", "fname.replace(\"/\", \"-\") return fname + extension def write(self, fname, data, format=None): fname =", "fname = self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\") as outfile: outfile.write(self.normalize_data(data, format)) def compare(self,", "normalize given data with a given format in order to compare them with", "= self._sanitize_fname(fname) output_data = self.normalize_data(data, format) with open(os.path.join(self.path, fname), \"r\") as infile: compdata", "them with data dumps. \"\"\" if format == \"nq\": print(\"sort nq file\") lines", "outfile: outfile.write(self.normalize_data(data, format)) def compare(self, fname, data, format=None): fname = self._sanitize_fname(fname) output_data =", "OrderedDict class MockDataHandler(): def __init__(self): # define path for test data self.path =", "path for test data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self, data, format):", "json.dumps(data_json) # elif format == \"jsonl\": # lines = [] # for js", "in order to compare them with data dumps. \"\"\" if format == \"nq\":", "compare them with data dumps. \"\"\" if format == \"nq\": print(\"sort nq file\")", "collections import OrderedDict class MockDataHandler(): def __init__(self): # define path for test data", "# define path for test data self.path = os.path.join(lod_api.__path__[0], \"../../tests/data/mockout/\") print(self.path) def normalize_data(self,", "fname), \"w+\") as outfile: outfile.write(self.normalize_data(data, format)) def compare(self, fname, data, format=None): fname =", "outfile.write(self.normalize_data(data, format)) def compare(self, fname, data, format=None): fname = self._sanitize_fname(fname) output_data = self.normalize_data(data,", "= data # if format == \"json\": # data_json = json.loads(data, object_pairs_hook=orderedDict) #", "= self.normalize_data(data, format) with open(os.path.join(self.path, fname), \"r\") as infile: compdata = infile.read() diff", "from collections import OrderedDict class MockDataHandler(): def __init__(self): # define path for test", "fname, data, format=None): fname = self._sanitize_fname(fname) with open(os.path.join(self.path, fname), \"w+\") as outfile: outfile.write(self.normalize_data(data,", "object_pairs_hook=orderedDict) # data_out = json.dumps(data_json) # elif format == \"jsonl\": # lines =", "# data_json = json.loads(data, object_pairs_hook=orderedDict) # data_out = json.dumps(data_json) # elif format ==", "data # if format == \"json\": # data_json = json.loads(data, object_pairs_hook=orderedDict) # data_out", "format=None): fname = self._sanitize_fname(fname) output_data = self.normalize_data(data, format) with open(os.path.join(self.path, fname), \"r\") as", "= \"\\n\".join(lines) return data_out def _sanitize_fname(self, fname, extension=\".dat\"): fname = fname.replace(\"/\", \"-\") return", "print(js) # data_json = json.loads(js, object_pairs_hook=orderedDict) # lines.append(json.dumps(data_json)) # data_out = \"\\n\".join(lines) return", "elif format == \"jsonl\": # lines = [] # for js in data.split(\"\\n\"):", "nq file\") lines = sorted(data.split(\"\\n\")) data_out = \"\\n\".join(lines) else: data_out = data #" ]
[ "class BaseGroup: def __init__(self, log_type): self.log_type = log_type def __call__(self, data): return data" ]
[ "= Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat short uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName", "random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\"", "= re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str) -> str: # Ignore", "= \" \".join(filter(lambda x: len(x) > 3, strip.split())) # Weight failure token for", "strip = Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers strip = Tokenizer.digits_re.sub(\"\", strip) # Only", "if Tokenizer.rawline_re.search(line): return \"\" strip = line # Break URI percent encoding strip", "= re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re", "strip) # Remove tiny words strip = \" \".join(filter(lambda x: len(x) > 3,", "password for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" # zuul random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^", "( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\"", "os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\"", "r\"|^[^ ]{64}$\" # useless debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" )", "logreduce.tokenizer import re import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\"", "available authentication methods\" r\"|unix_chkpwd.*: password check failed for user\" r\"|sshd.*: authentication failure\" r\"|sshd.*:", "r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\")", "raw pattern first if Tokenizer.rawline_re.search(line): return \"\" strip = line # Break URI", "-> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID \", strip) # Remove git sha strip", "A copy of logreduce.tokenizer import re import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS =", "# re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re", "class Tokenizer: rawline_re = re.compile( # useless http GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS", "= Tokenizer.uri_percent_re.sub(\" \", strip) # Remove words that are exactly 32, 64 or", "ETA \" # yum mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh", "strip) # Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat short uuid", "= Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers strip = Tokenizer.digits_re.sub(\"\", strip) # Only keep", "\".join(filter(lambda x: len(x) > 3, strip.split())) # Weight failure token for token in", "# useless debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" ) # See", "hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip)", "HEATID \", strip) # Remove git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove", "strip.split())) # Weight failure token for token in (\"error\", \"fail\", \"warn\"): if token", "uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID \",", "heat short uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\"", "re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works power2_re", "Tokenizer.rawline_re.search(line): return \"\" strip = line # Break URI percent encoding strip =", "git status r\"|HEAD is now at|Change-Id: \" # Download statement r\"| ETA \"", "re.compile( # useless http GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200' #", "[a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect", "= re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\")", "strip) # Only keep characters strip = Tokenizer.alpha_re.sub(\" \", strip) # Remove tiny", "# re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works", "\\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re =", "import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS =", "in (\"error\", \"fail\", \"warn\"): if token in strip.lower(): strip += \" %sA %sB", "strip = Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove", "/ HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200' # ssh keys r\"|AAAA[A-Z][0-9]\" # hashed password", "\").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile(", "= re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re", "( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re", "= re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re = re.compile(r\"-\\w{12}[-", "hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str) -> str: # Ignore some raw", "= \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE", "pattern first if Tokenizer.rawline_re.search(line): return \"\" strip = line # Break URI percent", "mirror.\" # ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect using the available", "Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random path", "r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE =", "# Remove git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes strip =", "\", strip) # Remove words that are exactly 32, 64 or 128 character", "r\"|sshd.*Unable to connect using the available authentication methods\" r\"|unix_chkpwd.*: password check failed for", "SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = (", "= re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re", "'AUTH_test_') -> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works power2_re = re.compile(", "= Tokenizer.alpha_re.sub(\" \", strip) # Remove tiny words strip = \" \".join(filter(lambda x:", "to connect using the available authentication methods\" r\"|unix_chkpwd.*: password check failed for user\"", "token in (\"error\", \"fail\", \"warn\"): if token in strip.lower(): strip += \" %sA", "of logreduce.tokenizer import re import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\"", "status r\"|HEAD is now at|Change-Id: \" # Download statement r\"| ETA \" #", "# For some unknown reason, '_' in (?=) doesn't work in prefix match", "statement r\"| ETA \" # yum mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\"", "Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat short uuid but keep", "password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" # git status r\"|HEAD is now at|Change-Id: \"", "sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip) #", "Remove date strip = Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip)", "Break URI percent encoding strip = Tokenizer.uri_percent_re.sub(\" \", strip) # Remove words that", "or 128 character longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid strip =", "random path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date strip = Tokenizer.date_re.sub(\"DATE\", strip)", "uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS,", "r'\"GET / HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200' # ssh keys r\"|AAAA[A-Z][0-9]\" # hashed", "strip) # Remove heat short uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName", "re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str) -> str: # Ignore some", "Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat short", "longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) #", "See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE)) #", "strip = line # Break URI percent encoding strip = Tokenizer.uri_percent_re.sub(\" \", strip)", "digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\")", "IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re = re.compile( # useless", "# useless http GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200' # ssh", "# Only keep characters strip = Tokenizer.alpha_re.sub(\" \", strip) # Remove tiny words", "Failed password for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" # zuul random test r\"|zuul.*echo BECOME-SUCCESS-\"", "# ssh keys r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" # git", "strip += \" %sA %sB %sC %sD\" % (token, token, token, token) return", "strip = Tokenizer.heat_re.sub(\" HEATID \", strip) # Remove git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\",", "# Remove numbers strip = Tokenizer.digits_re.sub(\"\", strip) # Only keep characters strip =", "MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re = re.compile( # useless http GET r'\"GET", "strip) # Remove numbers strip = Tokenizer.digits_re.sub(\"\", strip) # Only keep characters strip", "numbers strip = Tokenizer.digits_re.sub(\"\", strip) # Only keep characters strip = Tokenizer.alpha_re.sub(\" \",", "in strip.lower(): strip += \" %sA %sB %sC %sD\" % (token, token, token,", "Tokenizer.uri_percent_re.sub(\" \", strip) # Remove words that are exactly 32, 64 or 128", "UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\"", "= ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE =", "user' r\"|sshd.*Unable to connect using the available authentication methods\" r\"|unix_chkpwd.*: password check failed", "r\"|-----BEGIN\" # git status r\"|HEAD is now at|Change-Id: \" # Download statement r\"|", "= Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random", "# Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat short uuid but", "= re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str) -> str: # Ignore some raw pattern", "# Weight failure token for token in (\"error\", \"fail\", \"warn\"): if token in", "r\"|sshd.*: Failed password for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" # zuul random test r\"|zuul.*echo", "Ignore some raw pattern first if Tokenizer.rawline_re.search(line): return \"\" strip = line #", "ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID \", strip) # Remove git sha", "copy of logreduce.tokenizer import re import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = (", "strip) # Remove date strip = Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr strip =", "other mirror.\" # ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect using the", "# Break URI percent encoding strip = Tokenizer.uri_percent_re.sub(\" \", strip) # Remove words", "\"\" strip = line # Break URI percent encoding strip = Tokenizer.uri_percent_re.sub(\" \",", "# yum mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh scan attempts", "POSSIBLE BREAK-IN ATTEMPT\" # zuul random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless", ") heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*') alpha_re", "strip) # Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers strip =", "re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re = re.compile(r\"-\\w{12}[- \\\"$]\")", "re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE)) # For some unknown reason, '_' in (?=)", "using the available authentication methods\" r\"|unix_chkpwd.*: password check failed for user\" r\"|sshd.*: authentication", "= \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS =", "strip = \" \".join(filter(lambda x: len(x) > 3, strip.split())) # Weight failure token", "strip) # Remove git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes strip", "# Copyright (C) 2022 Red Hat # SPDX-License-Identifier: Apache-2.0 # A copy of", "re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE)) # For some unknown reason,", "rawline_re = re.compile( # useless http GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\"", "\" \".join(filter(lambda x: len(x) > 3, strip.split())) # Weight failure token for token", "SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" )", "= Tokenizer.heat_re.sub(\" HEATID \", strip) # Remove git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip)", ") # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE,", "doesn't work in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't work #", "ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID \", strip) # Remove git sha strip =", "zuul random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless debug statement r\"|ovs-ofctl .*", "authentication methods\" r\"|unix_chkpwd.*: password check failed for user\" r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed", "re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def", "scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect using the available authentication methods\" r\"|unix_chkpwd.*:", "works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE,", "are exactly 32, 64 or 128 character longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip) #", "= r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re = re.compile( # useless http GET r'\"GET /", "mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user'", "# Remove heat short uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip", "IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class", "information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable", "failure token for token in (\"error\", \"fail\", \"warn\"): if token in strip.lower(): strip", "uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE)) # For some", "Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers strip = Tokenizer.digits_re.sub(\"\", strip) # Only keep characters", "attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect using the available authentication methods\" r\"|unix_chkpwd.*: password", "r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect using the available authentication methods\" r\"|unix_chkpwd.*: password check", "\"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE =", "characters strip = Tokenizer.alpha_re.sub(\" \", strip) # Remove tiny words strip = \"", "useless http GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200' # ssh keys", "]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS),", "(DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*#", "# zuul random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless debug statement r\"|ovs-ofctl", ") IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re = re.compile( #", "Download statement r\"| ETA \" # yum mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other", "\"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\"", "strip = Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) #", "|^%% |^#|^[\\s]*id = \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\")", "Tokenizer: rawline_re = re.compile( # useless http GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS *", "work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" )", "Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers strip", "that are exactly 32, 64 or 128 character longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip)", "= \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re =", "# Remove tiny words strip = \" \".join(filter(lambda x: len(x) > 3, strip.split()))", "ATTEMPT\" # zuul random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless debug statement", "MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE", "3, strip.split())) # Weight failure token for token in (\"error\", \"fail\", \"warn\"): if", "Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random path strip = Tokenizer.randpath_re.sub(\"RNGP\",", "r\"|unix_chkpwd.*: password check failed for user\" r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed password for\"", "for token in (\"error\", \"fail\", \"warn\"): if token in strip.lower(): strip += \"", "Certificates r\"|-----BEGIN\" # git status r\"|HEAD is now at|Change-Id: \" # Download statement", "the available authentication methods\" r\"|unix_chkpwd.*: password check failed for user\" r\"|sshd.*: authentication failure\"", "-L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE,", "strip = Tokenizer.digits_re.sub(\"\", strip) # Only keep characters strip = Tokenizer.alpha_re.sub(\" \", strip)", "hashed password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" # git status r\"|HEAD is now at|Change-Id:", "yum mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid", "= Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat", "heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*') alpha_re =", "re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re", "'AUTH_test_') -> works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\"", "= re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\"", "\", strip) # Remove git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes", "\"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE", "200' # ssh keys r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" #", "Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat short uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName ->", "+= \" %sA %sB %sC %sD\" % (token, token, token, token) return strip", "https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE)) # For", "(\"error\", \"fail\", \"warn\"): if token in strip.lower(): strip += \" %sA %sB %sC", "= line # Break URI percent encoding strip = Tokenizer.uri_percent_re.sub(\" \", strip) #", "For some unknown reason, '_' in (?=) doesn't work in prefix match #", "gitver_re = re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" )", "strip) # Remove words that are exactly 32, 64 or 128 character longs", "= Tokenizer.digits_re.sub(\"\", strip) # Only keep characters strip = Tokenizer.alpha_re.sub(\" \", strip) #", "# Download statement r\"| ETA \" # yum mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying", "re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re", "strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove", "# SPDX-License-Identifier: Apache-2.0 # A copy of logreduce.tokenizer import re import os DAYS", "methods\" r\"|unix_chkpwd.*: password check failed for user\" r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed password", "# Ignore some raw pattern first if Tokenizer.rawline_re.search(line): return \"\" strip = line", "% (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments =", "SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%%", "= r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re = re.compile( # useless http", "\"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\"", "re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re =", "\"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\"", "Red Hat # SPDX-License-Identifier: Apache-2.0 # A copy of logreduce.tokenizer import re import", "32, 64 or 128 character longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid", "= r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE", "# Certificates r\"|-----BEGIN\" # git status r\"|HEAD is now at|Change-Id: \" # Download", "BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\"", "re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re =", "% (IPV4_RE, IPV6_RE, MAC_RE)) # For some unknown reason, '_' in (?=) doesn't", "Only keep characters strip = Tokenizer.alpha_re.sub(\" \", strip) # Remove tiny words strip", "GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200' # ssh keys r\"|AAAA[A-Z][0-9]\" #", ".* -L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" %", "password check failed for user\" r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed password for\" r\"|sshd.*-", "keys r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" # git status r\"|HEAD", "= re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re", "= re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod", "= re.compile( # useless http GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200'", "r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str) ->", "r\"|HEAD is now at|Change-Id: \" # Download statement r\"| ETA \" # yum", "'RNG', 'AUTH_test_') -> works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^", "r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line:", "strip) # Remove random path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date strip", "randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\")", "Tokenizer.heat_re.sub(\" HEATID \", strip) # Remove git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) #", "* HTTP/1.0\" 200' # ssh keys r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\" # Certificates", "\" # yum mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh scan", "statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re =", "ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect using the available authentication methods\"", "SHORT_MONTHS, MONTHS), re.I ) heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id", "r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" # zuul random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" #", "str) -> str: # Ignore some raw pattern first if Tokenizer.rawline_re.search(line): return \"\"", "Hat # SPDX-License-Identifier: Apache-2.0 # A copy of logreduce.tokenizer import re import os", "@staticmethod def process(line: str) -> str: # Ignore some raw pattern first if", "BREAK-IN ATTEMPT\" # zuul random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless debug", "is now at|Change-Id: \" # Download statement r\"| ETA \" # yum mirrors", "connect using the available authentication methods\" r\"|unix_chkpwd.*: password check failed for user\" r\"|sshd.*:", "prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_')", ".* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re", "UUID_RE, re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I )", "Weight failure token for token in (\"error\", \"fail\", \"warn\"): if token in strip.lower():", "r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re = re.compile(", "Remove tiny words strip = \" \".join(filter(lambda x: len(x) > 3, strip.split())) #", "IPV6_RE, MAC_RE)) # For some unknown reason, '_' in (?=) doesn't work in", "re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str) -> str: # Ignore some raw pattern first", "Apache-2.0 # A copy of logreduce.tokenizer import re import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\"", "(IPV4_RE, IPV6_RE, MAC_RE)) # For some unknown reason, '_' in (?=) doesn't work", ") SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE =", "-> works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" %", "exactly 32, 64 or 128 character longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove", "if token in strip.lower(): strip += \" %sA %sB %sC %sD\" % (token,", "re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re =", "uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat short uuid but keep spacing", "strip = Tokenizer.uuid_re.sub(\"RNGU\", strip) # Remove heat short uuid but keep spacing #", "now at|Change-Id: \" # Download statement r\"| ETA \" # yum mirrors information", "spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID \", strip) # Remove", "= \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\" UUID_RE = r\"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-\" \"?[0-9a-f]{12}\" IPV4_RE = ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\"", "(?=) doesn't work in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't work", "= re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE)) # For some unknown reason, '_' in", "re import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS", "some unknown reason, '_' in (?=) doesn't work in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)',", "re.I ) heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*')", "for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" # zuul random test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\"", "failed for user\" r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed password for\" r\"|sshd.*- POSSIBLE BREAK-IN", "= Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date strip = Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr", "% UUID_RE, re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I", "r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" # git status r\"|HEAD is now at|Change-Id: \" #", "check failed for user\" r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed password for\" r\"|sshd.*- POSSIBLE", "\"fail\", \"warn\"): if token in strip.lower(): strip += \" %sA %sB %sC %sD\"", "gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str) -> str: #", "in (?=) doesn't work in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't", "but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID \", strip)", "MONTHS), re.I ) heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id =", "doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\"", "URI percent encoding strip = Tokenizer.uri_percent_re.sub(\" \", strip) # Remove words that are", "r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .*", "\" # Download statement r\"| ETA \" # yum mirrors information r\"|\\* [a-zA-Z]+:", "strip.lower(): strip += \" %sA %sB %sC %sD\" % (token, token, token, token)", "some raw pattern first if Tokenizer.rawline_re.search(line): return \"\" strip = line # Break", "user\" r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed password for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" #", "ssh keys r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" # git status", "# git status r\"|HEAD is now at|Change-Id: \" # Download statement r\"| ETA", "r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to", "Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date strip = Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr strip", "authentication failure\" r\"|sshd.*: Failed password for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" # zuul random", "alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\"", "tiny words strip = \" \".join(filter(lambda x: len(x) > 3, strip.split())) # Weight", "SPDX-License-Identifier: Apache-2.0 # A copy of logreduce.tokenizer import re import os DAYS =", "MAC_RE)) # For some unknown reason, '_' in (?=) doesn't work in prefix", "'RNG', 'AUTH_test_') -> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works power2_re =", "path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date strip = Tokenizer.date_re.sub(\"DATE\", strip) #", "first if Tokenizer.rawline_re.search(line): return \"\" strip = line # Break URI percent encoding", "words strip = \" \".join(filter(lambda x: len(x) > 3, strip.split())) # Weight failure", "= Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove", "= re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS,", "Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers strip = Tokenizer.digits_re.sub(\"\", strip)", "token in strip.lower(): strip += \" %sA %sB %sC %sD\" % (token, token,", "# A copy of logreduce.tokenizer import re import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS", "|^#|^[\\s]*id = \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re", "2022 Red Hat # SPDX-License-Identifier: Apache-2.0 # A copy of logreduce.tokenizer import re", "def process(line: str) -> str: # Ignore some raw pattern first if Tokenizer.rawline_re.search(line):", "128 character longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\",", "useless debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding", "r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str)", "line # Break URI percent encoding strip = Tokenizer.uri_percent_re.sub(\" \", strip) # Remove", "strip = Tokenizer.uri_percent_re.sub(\" \", strip) # Remove words that are exactly 32, 64", "HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200' # ssh keys r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\"", "(dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re =", "]{64}$\" # useless debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" ) #", "# See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE))", "r\"| ETA \" # yum mirrors information r\"|\\* [a-zA-Z]+: [a-zA-Z0-9\\.-]*$|Trying other mirror.\" #", "debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables .* -L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re", "= re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE)) # For some unknown", "'_' in (?=) doesn't work in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') ->", "ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers strip = Tokenizer.digits_re.sub(\"\", strip) #", "r'|\"OPTIONS * HTTP/1.0\" 200' # ssh keys r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\" #", "r\"|(ip|eb)tables .* -L\\b\" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r\"%[2345][0-9A-F]\") ip_re = re.compile(r\"%s|%s|%s\"", ") uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" %", "http GET r'\"GET / HTTP/1.1\"' r'|\"OPTIONS * HTTP/1.0\" 200' # ssh keys r\"|AAAA[A-Z][0-9]\"", "r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed password for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" # zuul", "= Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers", "at|Change-Id: \" # Download statement r\"| ETA \" # yum mirrors information r\"|\\*", "token for token in (\"error\", \"fail\", \"warn\"): if token in strip.lower(): strip +=", "encoding strip = Tokenizer.uri_percent_re.sub(\" \", strip) # Remove words that are exactly 32,", "unknown reason, '_' in (?=) doesn't work in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG',", "# Remove words that are exactly 32, 64 or 128 character longs strip", "r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re =", "date strip = Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip) #", "[a-zA-Z0-9\\.-]*$|Trying other mirror.\" # ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect using", ") gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re = re.compile(r\"SHA256:[\\w+/]{43}\\b\") @staticmethod def process(line: str) -> str:", "re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re = re.compile(r\"\\b\\w{7}\\.\\.\\w{7}\\b\") hash_re =", "str: # Ignore some raw pattern first if Tokenizer.rawline_re.search(line): return \"\" strip =", "date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re =", "process(line: str) -> str: # Ignore some raw pattern first if Tokenizer.rawline_re.search(line): return", "# hashed password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" # git status r\"|HEAD is now", "# ssh scan attempts r'|audit.*exe=\"/usr/sbin/sshd\"|sshd.*[iI]nvalid user' r\"|sshd.*Unable to connect using the available authentication", "test r\"|zuul.*echo BECOME-SUCCESS-\" r\"|^[^ ]{64}$\" # useless debug statement r\"|ovs-ofctl .* (dump-ports|dump-flows|show)\\b\" r\"|(ip|eb)tables", "r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\" # git status r\"|HEAD is", "Remove git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\",", "\", strip) # Remove tiny words strip = \" \".join(filter(lambda x: len(x) >", "= ( r\"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\\.){3}\" r\"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\" ) IPV6_RE = r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer:", "\"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS = \"mon|tue|wed|thu|fri|sat|sun\"", "len(x) > 3, strip.split())) # Weight failure token for token in (\"error\", \"fail\",", "Remove random path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date strip = Tokenizer.date_re.sub(\"DATE\",", "reason, '_' in (?=) doesn't work in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_')", "work in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)',", "r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re = re.compile(r\"-\\w{12}[- \\\"$]\") comments", "# Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove numbers strip = Tokenizer.digits_re.sub(\"\",", "Remove words that are exactly 32, 64 or 128 character longs strip =", "x: len(x) > 3, strip.split())) # Weight failure token for token in (\"error\",", "power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I)", "short uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID", "# Remove date strip = Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\",", "return \"\" strip = line # Break URI percent encoding strip = Tokenizer.uri_percent_re.sub(\"", "percent encoding strip = Tokenizer.uri_percent_re.sub(\" \", strip) # Remove words that are exactly", "keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID \", strip) #", "> 3, strip.split())) # Weight failure token for token in (\"error\", \"fail\", \"warn\"):", "Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date", "# Remove random path strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date strip =", "Copyright (C) 2022 Red Hat # SPDX-License-Identifier: Apache-2.0 # A copy of logreduce.tokenizer", "# ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(\" HEATID \", strip) # Remove git", "-> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') -> works power2_re = re.compile( r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\"", "re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\" % (DAYS, SHORT_DAYS, SHORT_MONTHS,", "-> str: # Ignore some raw pattern first if Tokenizer.rawline_re.search(line): return \"\" strip", "strip = Tokenizer.randpath_re.sub(\"RNGP\", strip) # Remove date strip = Tokenizer.date_re.sub(\"DATE\", strip) # Remove", "HTTP/1.0\" 200' # ssh keys r\"|AAAA[A-Z][0-9]\" # hashed password r\"|\\$[0-9]\\$\" # Certificates r\"|-----BEGIN\"", "git sha strip = Tokenizer.gitsha_re.sub(\"RNGG\", strip) # Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip)", "strip = Tokenizer.alpha_re.sub(\" \", strip) # Remove tiny words strip = \" \".join(filter(lambda", "re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\"", "# Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random path strip =", "strip) # Remove hashes strip = Tokenizer.hash_re.sub(\"RNGH\", strip) # Remove random path strip", "strip = Tokenizer.date_re.sub(\"DATE\", strip) # Remove ip/addr strip = Tokenizer.ip_re.sub(\"RNGI\", strip) # Remove", "\"warn\"): if token in strip.lower(): strip += \" %sA %sB %sC %sD\" %", "in prefix match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG',", "match # re.sub(r'(?=\\b|_)test(?=\\b|_)', 'RNG', 'AUTH_test_') -> doesn't work # re.sub(r'(?=\\b|_)_?test(?=\\b|_)', 'RNG', 'AUTH_test_') ->", "comments = re.compile(r'(?:[\\s]*# |^%% |^#|^[\\s]*id = \").*') alpha_re = re.compile(r\"[^a-zA-Z_\\/\\s]+\") gitver_re = re.compile(r\"git\\w+\")", "ip_re = re.compile(r\"%s|%s|%s\" % (IPV4_RE, IPV6_RE, MAC_RE)) # For some unknown reason, '_'", "words that are exactly 32, 64 or 128 character longs strip = Tokenizer.power2_re.sub(\"RNGN\",", "Remove heat short uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip =", "keep characters strip = Tokenizer.alpha_re.sub(\" \", strip) # Remove tiny words strip =", "Tokenizer.digits_re.sub(\"\", strip) # Only keep characters strip = Tokenizer.alpha_re.sub(\" \", strip) # Remove", "r\"([0-9A-Fa-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}[0-9A-Fa-f]{1,3}\" MAC_RE = r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re = re.compile( # useless http GET", "character longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid strip = Tokenizer.uuid_re.sub(\"RNGU\", strip)", "Tokenizer.alpha_re.sub(\" \", strip) # Remove tiny words strip = \" \".join(filter(lambda x: len(x)", "Remove numbers strip = Tokenizer.digits_re.sub(\"\", strip) # Only keep characters strip = Tokenizer.alpha_re.sub(\"", "DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" ) SHORT_MONTHS = \"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev\" SHORT_DAYS", "for user\" r\"|sshd.*: authentication failure\" r\"|sshd.*: Failed password for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\"", "failure\" r\"|sshd.*: Failed password for\" r\"|sshd.*- POSSIBLE BREAK-IN ATTEMPT\" # zuul random test", "64 or 128 character longs strip = Tokenizer.power2_re.sub(\"RNGN\", strip) # Remove uuid strip", "import re import os DAYS = \"sunday|monday|tuesday|wednesday|thursday|friday|saturday\" MONTHS = ( \"january|february|march|april|may|june|july|august|september|\" \"october|november|december\" )", "r\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\" class Tokenizer: rawline_re = re.compile( # useless http GET r'\"GET / HTTP/1.1\"'", "re.compile(r\"git\\w+\") digits_re = re.compile(r\"0x[0-9a-fA-F]{2,}|[0-9]+(?:\\.\\d+)?\") randpath_re = re.compile( r\"(?:/tmp/ansible\\.\\w{8}\" r\"|/tmp/tmp\\w{6}\" r\"|/tmp/tmp\\.\\w{10})\\b\" ) gitsha_re =", "r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re = re.compile( r\"\\b(?:%s|%s|%s|%s)\\b\"", "(C) 2022 Red Hat # SPDX-License-Identifier: Apache-2.0 # A copy of logreduce.tokenizer import", "r\"(?=\\b|_)_?(?:[\\w+/]{128}|[\\w+/]{64}|\" r\"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\b|_)\" ) uuid_re = re.compile(r\"(?=\\b|_)_?(?:%s|tx[^ ]{32})(?=\\b|_)\" % UUID_RE, re.I) date_re = re.compile(" ]
[ "identifies a job. Returns ------- dict Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self,", "list List of metric names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\")", "show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character hexadecimal string", "a location accessible by both the JobManager(s) and TaskManager(s) e.g. a location on", "a job. metric_names: list (optional) List of selected specific metric names. Default: <all", "= _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def create_savepoint(self,", "int Positive integer value that identifies a subtask. Returns ------- dict \"\"\" return", "for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns an", "that identifies a subtask. Returns ------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id,", "[ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\"", "def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns details for a checkpoint. Endpoint: [GET]", "---------- subtask_id: int Positive integer value that identifies a subtask. attempt_id: int (Optional)", "through the subtask_ids() method. Default: <all subtasks>. Returns ------- dict Key-value pairs of", "aggregated across the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character", "metric names. Default: <all metrics> Returns ------- dict Job metrics. \"\"\" if metric_names", "a location on a distributed file-system or Object Store. Parameters ---------- job_id: str", "in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem[\"value\"] return result def subtasktimes(self): \"\"\"", "metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params )", "of job_ids. Returns ------- list List of job ids. \"\"\" return [elem[\"id\"] for", "execution result of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None):", "\"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns task information aggregated by task manager.", "params=params ) return dict([(elem[\"id\"], elem[\"value\"]) for elem in query_result]) def get_plan(self, job_id): \"\"\"", "that identifies a job. target_directory: str Savepoint target directory. drain: bool (Optional) If", "target_directory, cancel_job=False): \"\"\" Triggers a savepoint, and optionally cancels the job afterwards. This", "strings to select specific jobs. The list of valid jobs are available through", "List of selected specific metric names. Default: <all metrics> agg_modes: list (optional) List", "= {} if include_serialized_value is not None: params[\"includeSerializedValue\"] = ( \"true\" if include_serialized_value", "the selected execution attempt of a subtask. \"\"\" if attempt_id is None: attempt_id", "url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not show_subtasks: return checkpoint_details subtasks = {} for vertex_id", "Default: <all taskmanagers>. Returns ------- dict Aggregated job metrics. \"\"\" if metric_names is", "_execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix:", "checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details def rescale(self, job_id, parallelism): \"\"\" Triggers the rescaling", "job. When the watermark is emitted, all event time timers will fire, allowing", "hexadecimal strings to select specific jobs. The list of valid jobs are available", "subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive integer value that identifies", "job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character hexadecimal string value that", "_execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns details of an execution attempt of", "the back pressure stats are not available. Returns ------- dict Backpressure information \"\"\"", "time timers will fire, allowing you to process events that depend on this", "result = {} for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem", "_execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\" Returns the ids of vertices of the selected", "The list of valid jobs are available through the job_ids() method. Default: <all", "\"\"\" Returns the subtask identifiers. Returns ------- list Positive integer list of subtask", "Returns ------- list Positive integer list of subtask ids. \"\"\" return [elem[\"subtask\"] for", "url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns the accumulators of an execution", "metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics. \"\"\" if metric_names is", "JobVertexClient. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def", "a job. Returns ------- bool True if the job has been canceled, otherwise", "{} for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem[\"value\"] return result", "drain=False): \"\"\" Stops a job with a savepoint. This async operation would return", "task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns the watermarks for all", "metric names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self,", "User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns the supported metric names.", "@property def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def __init__(self, prefix): \"\"\"", "job. Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that", "\"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None): \"\"\"", "self._type_name = type_name self.job_id = job_id self.trigger_id = trigger_id @property def status(self): return", "f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if job_ids is None: job_ids = self.job_ids()", "The most recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\" Returns the", "Triggers the rescaling of a job. This async operation would return a 'triggerid'", "the selected job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self, job_id): \"\"\"", "of a subtask. Multiple execution attempts happen in case of failure/recovery. Parameters ----------", "of metric names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def", "return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ----------", "url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def create_savepoint(self, job_id, target_directory,", "that identifies a job. Returns ------- bool True if the job has been", "------- dict The most recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\"", "Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "job with a savepoint. This async operation would return a JobTrigger for further", "prefix self.job_id = job_id self.vertex_id = vertex_id @property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property", "elem in self.all()] def overview(self): \"\"\" Returns an overview over all jobs. Endpoint:", "[GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str", "Positive integer value that specifies the desired parallelism. Returns ------- JobTrigger Object that", "See FLINK-12312. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns the accumulators for all tasks of a job,", "str 32-character hexadecimal string value that identifies a job. checkpoint_id: int Long value", "query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using Flink version 1.12, the method", "is None: job_ids = self.job_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\":", "Details of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\" Returns", "type_name, job_id, trigger_id): self._prefix = prefix self._type_name = type_name self.job_id = job_id self.trigger_id", "max, sum, avg\". Default: <all modes> subtask_ids: list List of positive integers to", "\"\"\" Returns an overview over all jobs. Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names:", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def get_checkpointing_configuration(self, job_id): \"\"\" Returns the checkpointing configuration", "for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem return result def", "None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f\"The provided aggregation", "Store. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "available through the job_ids() method. Default: <all taskmanagers>. Returns ------- dict Aggregated job", "\"\"\" Returns details of an execution attempt of a subtask. Multiple execution attempts", "target directory has to be a location accessible by both the JobManager(s) and", "a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character hexadecimal string value", "_execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\" Returns the most recent exceptions that have been", "elem in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns all user-defined accumulators for all subtasks", "get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns details of an execution attempt of a subtask.", "status of savepoint. \"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\":", "= [\"min\", \"max\", \"sum\", \"avg\"] if agg_modes is None: agg_modes = supported_agg_modes if", "if subtask_ids is None: subtask_ids = self.subtask_ids() params = { \"get\": \",\".join(metric_names), \"agg\":", "hexadecimal string value that identifies a job. Returns ------- dict The execution result", "identifies a job. include_serialized_value: bool (Optional) Boolean value that specifies whether serialized user", "json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def terminate(self, job_id):", "of metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() supported_agg_modes = [\"min\",", "Endpoint: [GET] /jobs/overview Returns ------- list List of existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"]", "trigger_id @property def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def __init__(self, prefix):", "job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\" Returns the configuration of a", "def get(self, job_id): \"\"\" Returns details of a job. Endpoint: [GET] /jobs/:jobid Parameters", "string value that identifies a job. Returns ------- dict The most recent exceptions.", "happen in case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive", "elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None): \"\"\" Provides access to task metrics.", "a job. vertex_id: str 32-character hexadecimal string value that identifies a vertex. Returns", "\" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if subtask_ids is None: subtask_ids =", "identifies a checkpoint. show_subtasks: bool If it is True, the details of the", "string value that identifies a job. Returns ------- dict Dataflow plan \"\"\" return", "current execution attempt's id Returns ------- dict Details of the selected attempt. \"\"\"", "[\"min\", \"max\", \"sum\", \"avg\"] if agg_modes is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes)))", "string value that identifies a job. Returns ------- list List of checkpoint ids.", "By default it returns with all existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters", "Task information aggregated by task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns", "_execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None): \"\"\" Provides access to task metrics. Endpoint: [GET]", "\"\"\" Returns the watermarks for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks", "32-character hexadecimal string value that identifies a job. Returns ------- dict Details of", "metric_name = elem.pop(\"id\") result[metric_name] = elem return result def get(self, job_id): \"\"\" Returns", "value that identifies a job. Returns ------- dict Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"]", "checkpointing configuration of the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str", "or latest execution attempt of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id:", "of the selected attempt. \"\"\" if attempt_id is None: return self.get(subtask_id) return _execute_rest_request(", "in subtask_ids]), } query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for", "job. Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id: str 32-character hexadecimal string value that", "savepoint. \"\"\" data = { \"drain\": False if drain is None else drain,", "calculated. Available aggregations are: \"min, max, sum, avg\". Default: <all modes> job_ids: list", "[GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "on a distributed file-system or Object Store. Parameters ---------- job_id: str 32-character hexadecimal", "identifies a job. Returns ------- dict Details of the selected job. \"\"\" return", "None: metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params", "elem.pop(\"id\") result[metric_name] = elem return result def get(self, job_id): \"\"\" Returns details of", "[elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns all user-defined accumulators for", "/jobs/:jobid/config Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "\"max\", \"sum\", \"avg\"] if agg_modes is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) >", "Notes ----- The deprecated status means that the back pressure stats are not", "\"\"\" Returns the accumulators of an execution attempt of a subtask. Multiple execution", "_execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns the supported metric names. Returns ------- list List", "return [elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns", "[GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information for all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\")", "Default: <all metrics> Returns ------- dict Job metrics. \"\"\" if metric_names is None:", "subtask identifiers. Returns ------- list Positive integer list of subtask ids. \"\"\" return", "job metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() supported_agg_modes = [\"min\",", ") def get_checkpoints(self, job_id): \"\"\" Returns checkpointing statistics for a job. Endpoint: [GET]", "query the status of savepoint. \"\"\" data = { \"drain\": False if drain", "case of failure/recovery. Parameters ---------- subtask_id: int Positive integer value that identifies a", "= self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def __init__(self, prefix, job_id, vertex_id):", "subtask. \"\"\" if attempt_id is None: attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" )", "= _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def", "value that identifies a job. metric_names: list (optional) List of selected specific metric", "def get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns the accumulators for all tasks of a", "overview over all jobs. Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names: list (optional) List", "pairs of metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() supported_agg_modes =", "of 32-character hexadecimal strings to select specific jobs. The list of valid jobs", "would return a JobTrigger for further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes -----", "aggregation modes which should be calculated. Available aggregations are: \"min, max, sum, avg\".", "a job. Returns ------- dict Checkpointing statistics for the selected job: counts, summary,", "------- list Watermarks for all subtasks of a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class", "of vertices of the selected job. Parameters ---------- job_id: str 32-character hexadecimal string", "None: return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns", "return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def get_checkpointing_configuration(self, job_id): \"\"\" Returns the checkpointing", "result of a job execution. Gives access to the execution time of the", "job_id): \"\"\" Returns checkpoint ids of the job_id. Parameters ---------- job_id: str 32-character", "Watermarks for all subtasks of a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def", "query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem return result def get(self, job_id): \"\"\"", "is True, it emits the maximum watermark before stopping the job. default: False", "1.12, the method will raise RestHandlerException because this rescaling is temporarily disabled. See", "current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns the list of job_ids.", "of jobs and their current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns", "def rescale(self, job_id, parallelism): \"\"\" Triggers the rescaling of a job. This async", "elem in subtask_ids]), } query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {}", "drain is None else drain, \"targetDirectory\": target_directory, } trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\",", "\"\"\" Returns the accumulators for all tasks of a job, aggregated across the", "Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "an overview over all jobs. Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names: list (optional)", "location on a distributed file-system or Object Store. Parameters ---------- job_id: str 32-character", "<all modes> job_ids: list List of 32-character hexadecimal strings to select specific jobs.", "------- dict User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns the supported", "JobVertexClient instance that can execute vertex related queries. \"\"\" return JobVertexClient(self.prefix, job_id, vertex_id)", "\"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns the supported metric names. Returns -------", "summary, latest and history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self, job_id):", "return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def __init__(self, prefix, job_id, vertex_id): \"\"\" Constructor.", "savepoint, and optionally cancels the job afterwards. This async operation would return a", "= f\"{prefix}/jobs\" def all(self): \"\"\" Returns an overview over all jobs and their", "subtask_ids(self): \"\"\" Returns the subtask identifiers. Returns ------- list Positive integer list of", "Returns ------- list List of checkpoint ids. \"\"\" return [elem[\"id\"] for elem in", "of existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns the supported metric", "information for all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns task information", "the job has been canceled, otherwise False. \"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\",", "and may initiate back-pressure sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The", "a job. Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value", "unhandled events or state. Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id: str 32-character hexadecimal", "will raise RestHandlerException because this rescaling is temporarily disabled. See FLINK-12312. Parameters ----------", "elem return result def get(self, job_id): \"\"\" Returns details of a job. Endpoint:", "int Long value that identifies a checkpoint. show_subtasks: bool If it is True,", "pair. \"\"\" self._prefix = prefix @property def prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\"", "serialized user task accumulators should be included in the response. Returns ------- dict", "self.job_id = job_id self.trigger_id = trigger_id @property def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" )", "\"true\" if include_serialized_value else \"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def", "of aggregation modes which should be calculated. Available aggregations are: \"min, max, sum,", "value that identifies a subtask. Returns ------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self,", "------- dict Task information aggregated by task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self):", "the desired parallelism. Returns ------- JobTrigger Object that can be used to query", "_execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns the list of job_ids. Returns ------- list List", "job_ids: list List of 32-character hexadecimal strings to select specific jobs. The list", "the host, port pair. \"\"\" self.prefix = f\"{prefix}/jobs\" def all(self): \"\"\" Returns an", "JobTrigger Object that can be used to query the status of savepoint. \"\"\"", "[GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\" Triggers a savepoint, and optionally cancels the job", "and TaskManager(s) e.g. a location on a distributed file-system or Object Store. Parameters", "result[metric_name] = elem return result def get(self, job_id): \"\"\" Returns details of a", "__init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST API url prefix. It", "f\"{prefix}/jobs\" def all(self): \"\"\" Returns an overview over all jobs and their current", "operation would return a 'triggerid' for further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes", "drain: bool (Optional) If it is True, it emits the maximum watermark before", "get_vertex(self, job_id, vertex_id): \"\"\" Returns a JobVertexClient. Parameters ---------- job_id: str 32-character hexadecimal", "of positive integers to select specific subtasks. The list of valid subtask ids", "------- dict Aggregated job metrics. \"\"\" if metric_names is None: metric_names = self.metric_names()", "details of the current or latest execution attempt of a subtask. Endpoint: [GET]", "initiate back-pressure sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status", "job_id, metric_names=None): \"\"\" Provides access to job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ----------", "/jobs/:jobid/metrics Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "[GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "prefix, job_id, vertex_id): \"\"\" Constructor. Parameters ---------- prefix: str REST API url prefix.", "int Positive integer value that specifies the desired parallelism. Returns ------- JobTrigger Object", "manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information aggregated by task manager.", "that identifies a job. target_directory: str Savepoint target directory. cancel_job: bool If it", "access to job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id: str 32-character hexadecimal", "list List of identifiers. \"\"\" return [elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self,", "access to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics. \"\"\"", "happen in case of failure/recovery. Parameters ---------- subtask_id: int Positive integer value that", "contains invalid value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if", "List of 32-character hexadecimal strings to select specific jobs. The list of valid", "be used to query the status of savepoint. \"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\",", "prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST API url prefix. It must", "string value that identifies a job. Returns ------- list List of identifiers. \"\"\"", "backpressure(self): \"\"\" Returns back-pressure information for a job, and may initiate back-pressure sampling", "http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def get_vertex(self, job_id, vertex_id):", "configuration of a job. Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id: str 32-character hexadecimal", "over all jobs. Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names: list (optional) List of", "in the response. Returns ------- dict Accumulators for all task. \"\"\" params =", "Returns ------- dict Checkpointing statistics for the selected job: counts, summary, latest and", "url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"], elem[\"value\"]) for elem in query_result]) def get_plan(self, job_id):", "any unhandled events or state. Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id: str 32-character", "= job_id self.vertex_id = vertex_id @property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self):", "Returns ------- dict Checkpointing configuration of the selected job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\",", "elem in query_result]) def get_plan(self, job_id): \"\"\" Returns the dataflow plan of a", "back pressure stats are not available. Returns ------- dict Backpressure information \"\"\" return", "attempts happen in case of failure/recovery. Parameters ---------- subtask_id: int Positive integer value", "dict Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns the supported metric", "<all metrics> Returns ------- dict Job metrics. \"\"\" if metric_names is None: metric_names", "you to process events that depend on this timer (e.g. time windows or", "---------- job_id: str 32-character hexadecimal string value that identifies a job. vertex_id: str", "list of job_ids. Returns ------- list List of job ids. \"\"\" return [elem[\"id\"]", "\"\"\" Returns the configuration of a job. Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id:", "Returns ------- dict Key-value pairs of metrics. \"\"\" if metric_names is None: metric_names", "get_vertex_ids(self, job_id): \"\"\" Returns the ids of vertices of the selected job. Parameters", "Job metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() params = {\"get\":", "the ids of vertices of the selected job. Parameters ---------- job_id: str 32-character", "dict User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns the supported metric", "task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ----------", "a job. This async operation would return a 'triggerid' for further query identifier.", "time-related information for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns -------", "\",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"], elem[\"value\"]) for elem in", "get_config(self, job_id): \"\"\" Returns the configuration of a job. Endpoint: [GET] /jobs/:jobid/config Parameters", "statistics for the selected job: counts, summary, latest and history. \"\"\" return _execute_rest_request(", "method. Default: <all taskmanagers>. Returns ------- dict Aggregated job metrics. \"\"\" if metric_names", "aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if subtask_ids is None: subtask_ids", "accumulators should be included in the response. Returns ------- dict Accumulators for all", "hexadecimal string value that identifies a job. include_serialized_value: bool (Optional) Boolean value that", "= _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\",", "default: False Returns ------- JobTrigger Object that can be used to query the", "for further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The target directory has", "if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status means that the", "specific metric names. Default: <all metrics> agg_modes: list (optional) List of aggregation modes", "the details of the subtask are also returned. Returns ------- dict \"\"\" checkpoint_details", "http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details def rescale(self, job_id, parallelism): \"\"\" Triggers", "string value that identifies a job. vertex_id: str 32-character hexadecimal string value that", "for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides access", "string value that identifies a job. metric_names: list (optional) List of selected specific", "target_directory: str Savepoint target directory. drain: bool (Optional) If it is True, it", "returned. Returns ------- dict \"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not", "None: subtask_ids = self.subtask_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem)", "\"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\" Returns the result of a job", "task. \"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns back-pressure information for a job,", "It must contain the host, port pair. \"\"\" self.prefix = f\"{prefix}/jobs\" def all(self):", "JobManager(s) and TaskManager(s) e.g. a location on a distributed file-system or Object Store.", "is None: attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def __init__(self,", "------- dict Checkpointing statistics for the selected job: counts, summary, latest and history.", "It must contain the host, port pair. \"\"\" self._prefix = prefix @property def", "32-character hexadecimal string value that identifies a job. Returns ------- dict Dataflow plan", "that identifies a job. Returns ------- dict Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def", "32-character hexadecimal string value that identifies a job. Returns ------- dict The most", "str 32-character hexadecimal string value that identifies a vertex. Returns ------- JobVertexClient JobVertexClient", "= elem return result def get(self, subtask_id): \"\"\" Returns details of the current", "job. Returns ------- dict Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\"", "= elem[\"value\"] return result def subtasktimes(self): \"\"\" Returns time-related information for all subtasks", "the result of a job execution. Gives access to the execution time of", ") class JobVertexSubtaskClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST", "is None: return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\"", "case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive integer value", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def create_savepoint(self, job_id,", "Using Flink version 1.12, the method will raise RestHandlerException because this rescaling is", "job has been canceled, otherwise False. \"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202", "modes list contains invalid value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\"", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def get_vertex(self,", "def get_config(self, job_id): \"\"\" Returns the configuration of a job. Endpoint: [GET] /jobs/:jobid/config", "\"\"\" self._prefix = prefix self.job_id = job_id self.vertex_id = vertex_id @property def prefix_url(self):", "in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides access to aggregated", "Parameters ---------- subtask_id: int Positive integer value that identifies a subtask. Returns -------", "that identifies a job. metric_names: list (optional) List of selected specific metric names.", "stop(self, job_id, target_directory, drain=False): \"\"\" Stops a job with a savepoint. This async", ") class JobVertexClient: def __init__(self, prefix, job_id, vertex_id): \"\"\" Constructor. Parameters ---------- prefix:", "bool (Optional) Boolean value that specifies whether serialized user task accumulators should be", "self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns the accumulators for all tasks of", "\"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem in subtask_ids]), } query_result =", "metrics> Returns ------- dict Job metrics. \"\"\" if metric_names is None: metric_names =", "the host, port pair. \"\"\" self._prefix = prefix @property def prefix_url(self): return f\"{self._prefix}/subtasks\"", "also stops the job after the savepoint creation. Returns ------- JobTrigger Object that", "This is useful when you want to fully shut down your job without", "or state. Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id: str 32-character hexadecimal string value", "FLINK-12312. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "identifies a job. Returns ------- dict Checkpointing statistics for the selected job: counts,", "\"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\" Returns the ids of vertices of", "\"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return", "\"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None,", "to all accumulators created by this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id:", "\",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result", "the subtask identifiers. Returns ------- list Positive integer list of subtask ids. \"\"\"", "job_id: str 32-character hexadecimal string value that identifies a job. metric_names: list (optional)", "hexadecimal string value that identifies a job. Returns ------- list List of identifiers.", "JobVertexClient JobVertexClient instance that can execute vertex related queries. \"\"\" return JobVertexClient(self.prefix, job_id,", "return a JobTrigger for further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The", "the JobManager(s) and TaskManager(s) e.g. a location on a distributed file-system or Object", "\"\"\" return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns all user-defined", "} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for elem in", "is None: subtask_ids = self.subtask_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\":", "the savepoint creation. Returns ------- JobTrigger Object that can be used to query", "depend on this timer (e.g. time windows or process functions). This is useful", "existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional) List of", "\"\"\" Returns the result of a job execution. Gives access to the execution", "state. Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id: str 32-character hexadecimal string value that", "return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns the supported metric names. Returns ------- list", "Returns an overview over all jobs and their current state. Endpoint: [GET] /jobs", "an execution attempt of a subtask. Multiple execution attempts happen in case of", "Default: current execution attempt's id Returns ------- dict Details of the selected attempt.", "---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns -------", "32-character hexadecimal string value that identifies a job. Returns ------- dict Checkpointing statistics", "subtask_id: int Positive integer value that identifies a subtask. attempt_id: int (Optional) Positive", "Positive integer value that identifies a subtask. attempt_id: int (Optional) Positive integer value", "savepoint creation. Returns ------- JobTrigger Object that can be used to query the", "API url prefix. It must contain the host, port pair. \"\"\" self.prefix =", "a job. Returns ------- dict The execution result of the selected job. \"\"\"", "Default: <all metrics> agg_modes: list (optional) List of aggregation modes which should be", "all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators", "---------- subtask_id: int Positive integer value that identifies a subtask. Returns ------- dict", "to select specific subtasks. The list of valid subtask ids is available through", "a job. Returns ------- list List of checkpoint ids. \"\"\" return [elem[\"id\"] for", "JobTrigger for further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The target directory", "= prefix self._type_name = type_name self.job_id = job_id self.trigger_id = trigger_id @property def", "return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\" Returns the ids of vertices of the", "flink_rest_client.common import _execute_rest_request, RestException class JobTrigger: def __init__(self, prefix, type_name, job_id, trigger_id): self._prefix", "job. Returns ------- list List of identifiers. \"\"\" return [elem[\"id\"] for elem in", "Returns the watermarks for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns", "32-character hexadecimal strings to select specific jobs. The list of valid jobs are", "list of valid jobs are available through the job_ids() method. Default: <all taskmanagers>.", "_execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST", "query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result = {} for elem in query_result:", "str 32-character hexadecimal string value that identifies a job. metric_names: list (optional) List", "attempt_id: int (Optional) Positive integer value that identifies an execution attempt. Default: current", "across the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character hexadecimal", "of a job. Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id: str 32-character hexadecimal string", "= self.metric_names() supported_agg_modes = [\"min\", \"max\", \"sum\", \"avg\"] if agg_modes is None: agg_modes", "\"\"\" Returns details for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true:", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive integer value that identifies a", "return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, job_ids=None):", "return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns the list of job_ids. Returns ------- list", "job. Returns ------- dict The most recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self,", "= _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"], elem[\"value\"]) for elem in query_result]) def", "true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character hexadecimal string value that", "all task. \"\"\" params = {} if include_serialized_value is not None: params[\"includeSerializedValue\"] =", "subtasks return checkpoint_details def rescale(self, job_id, parallelism): \"\"\" Triggers the rescaling of a", "metric names. Default: <all metrics> agg_modes: list (optional) List of aggregation modes which", "RestException( f\"The provided aggregation modes list contains invalid value. Supported aggregation \" f\"modes:", "job. Returns ------- list List of checkpoint ids. \"\"\" return [elem[\"id\"] for elem", "agg_modes=None, subtask_ids=None): \"\"\" Provides access to aggregated subtask metrics. By default it returns", "{ \"drain\": False if drain is None else drain, \"targetDirectory\": target_directory, } trigger_id", "class JobsClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST API", "selected job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self, job_id): \"\"\" Returns", "/jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status means that the back pressure stats are", "a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive integer value that", "before stopping the job. When the watermark is emitted, all event time timers", "names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None,", "to aggregated subtask metrics. By default it returns with all existing metric names.", "def get(self, subtask_id): \"\"\" Returns details of the current or latest execution attempt", "id Returns ------- dict The accumulators of the selected execution attempt of a", "return False def stop(self, job_id, target_directory, drain=False): \"\"\" Stops a job with a", "a savepoint. This async operation would return a JobTrigger for further query identifier.", "history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint", "task, with a summary for each of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns", "subtask_ids]), } query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for elem", "for a task, with a summary for each of its subtasks. Endpoint: [GET]", "Returns an overview over all jobs. Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names: list", "\"\"\" Returns checkpoint ids of the job_id. Parameters ---------- job_id: str 32-character hexadecimal", "---------- job_id: str 32-character hexadecimal string value that identifies a job. include_serialized_value: bool", "if not show_subtasks: return checkpoint_details subtasks = {} for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id]", "show_subtasks=False): \"\"\" Returns details for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is", "subtask are also returned. Returns ------- dict \"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\",", "(optional) List of aggregation modes which should be calculated. Available aggregations are: \"min,", "job. Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id: str 32-character hexadecimal string value that", "user task accumulators should be included in the response. Returns ------- dict Accumulators", "select specific subtasks. The list of valid subtask ids is available through the", "\"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns details of an execution", "parallelism: int Positive integer value that specifies the desired parallelism. Returns ------- JobTrigger", "hexadecimal string value that identifies a job. Returns ------- dict Job configuration \"\"\"", "'triggerid' for further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using Flink version", "that the back pressure stats are not available. Returns ------- dict Backpressure information", "execution attempt's id Returns ------- dict Details of the selected attempt. \"\"\" if", "Returns ------- dict Task information aggregated by task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def", "if attempt_id is None: return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id,", "/jobs/overview Returns ------- list List of existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self):", "cancel_job: bool If it is True, it also stops the job after the", "include_serialized_value: bool (Optional) Boolean value that specifies whether serialized user task accumulators should", "self.job_id = job_id self.vertex_id = vertex_id @property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def", "return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns the accumulators of", "\"\"\" Provides access to job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id: str", "most recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\" Returns the result", "if include_serialized_value else \"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def get_checkpointing_configuration(self,", "get_checkpoints(self, job_id): \"\"\" Returns checkpointing statistics for a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters", "that can be used to query the status of savepoint. \"\"\" trigger_id =", "value that identifies a job. include_serialized_value: bool (Optional) Boolean value that specifies whether", "the accumulators for all tasks of a job, aggregated across the respective subtasks.", "metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns an overview over all jobs. Endpoint: [GET] /jobs/metrics", "List of identifiers. \"\"\" return [elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id,", "trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix,", "before stopping the job. default: False Returns ------- JobTrigger Object that can be", "exceptions that have been handled by Flink for this job. Endpoint: [GET] /jobs/:jobid/exceptions", "that specifies whether serialized user task accumulators should be included in the response.", "dict Aggregated job metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() supported_agg_modes", "a job. Returns ------- dict The most recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def", "a subtask. attempt_id: int (Optional) Positive integer value that identifies an execution attempt.", "details for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint: [GET]", "self.job_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result =", "Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using Flink version 1.12, the method will raise", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not show_subtasks: return checkpoint_details subtasks = {} for", "status means that the back pressure stats are not available. Returns ------- dict", "attempt_id is None: attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def", "for all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns task information aggregated", "include_serialized_value else \"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def get_checkpointing_configuration(self, job_id):", "http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False):", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for all subtasks of a task.", "Default: <all modes> subtask_ids: list List of positive integers to select specific subtasks.", "must contain the host, port pair. \"\"\" self._prefix = prefix @property def prefix_url(self):", "JobTrigger: def __init__(self, prefix, type_name, job_id, trigger_id): self._prefix = prefix self._type_name = type_name", "target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def terminate(self, job_id): \"\"\" Terminates a", "identifies a job. Returns ------- bool True if the job has been canceled,", "a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\")", "def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns the accumulators of an execution attempt of", "watermark before stopping the job. default: False Returns ------- JobTrigger Object that can", "Checkpointing statistics for the selected job: counts, summary, latest and history. \"\"\" return", "[GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for all subtasks of a task. \"\"\"", "------- JobTrigger Object that can be used to query the status of rescaling.", "result[metric_name] = elem[\"value\"] return result def subtasktimes(self): \"\"\" Returns time-related information for all", "the status of savepoint. \"\"\" data = { \"drain\": False if drain is", "all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns task information aggregated by", "get_exceptions(self, job_id): \"\"\" Returns the most recent exceptions that have been handled by", "Savepoint target directory. cancel_job: bool If it is True, it also stops the", "information for a job, and may initiate back-pressure sampling if necessary. Endpoint: [GET]", "terminate(self, job_id): \"\"\" Terminates a job. Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id: str", "list List of jobs and their current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self):", "Returns details of the current or latest execution attempt of a subtask. Endpoint:", "a job. Returns ------- dict Details of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\")", "execution. Gives access to the execution time of the job and to all", "string value that identifies a job. Returns ------- dict Checkpointing statistics for the", "further query identifier. Attention: The target directory has to be a location accessible", "a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for all subtasks of", "[GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive integer value that identifies a subtask.", "value that identifies a job. checkpoint_id: int Long value that identifies a checkpoint.", "def get_execution_result(self, job_id): \"\"\" Returns the result of a job execution. Gives access", "_execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def __init__(self, prefix, job_id, vertex_id): \"\"\" Constructor. Parameters", "host, port pair. \"\"\" self._prefix = prefix self.job_id = job_id self.vertex_id = vertex_id", "/jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "\"\"\" Returns back-pressure information for a job, and may initiate back-pressure sampling if", "statistics for a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character hexadecimal", "checkpoint_id, show_subtasks=False): \"\"\" Returns details for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks", "job. vertex_id: str 32-character hexadecimal string value that identifies a vertex. Returns -------", "return checkpoint_details def rescale(self, job_id, parallelism): \"\"\" Triggers the rescaling of a job.", "of selected specific metric names. Default: <all metrics> agg_modes: list (optional) List of", "subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information for", "are available through the job_ids() method. Default: <all taskmanagers>. Returns ------- dict Aggregated", "and optionally cancels the job afterwards. This async operation would return a JobTrigger", "return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns back-pressure information for a job, and may", "List of aggregation modes which should be calculated. Available aggregations are: \"min, max,", "List of job ids. \"\"\" return [elem[\"id\"] for elem in self.all()] def overview(self):", "list List of positive integers to select specific subtasks. The list of valid", "job afterwards. This async operation would return a JobTrigger for further query identifier.", "subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details def rescale(self,", "all accumulators created by this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str", "dataflow plan of a job. Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id: str 32-character", "[GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status means that the back pressure stats", "hexadecimal string value that identifies a job. metric_names: list (optional) List of selected", "Returns the subtask identifiers. Returns ------- list Positive integer list of subtask ids.", "the status of savepoint. \"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job,", "List of selected specific metric names. Default: <all metrics> Returns ------- dict Job", "trigger_id): self._prefix = prefix self._type_name = type_name self.job_id = job_id self.trigger_id = trigger_id", "http_method=\"PATCH\", accepted_status_code=202 ) if len(res) < 1: return True else: return False def", "query_result]) def get_plan(self, job_id): \"\"\" Returns the dataflow plan of a job. Endpoint:", "__init__(self, prefix, type_name, job_id, trigger_id): self._prefix = prefix self._type_name = type_name self.job_id =", "attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def __init__(self, prefix, job_id,", "------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns details of", "agg_modes=None, job_ids=None): \"\"\" Returns an overview over all jobs. Endpoint: [GET] /jobs/metrics Parameters", "hexadecimal string value that identifies a job. Returns ------- list List of checkpoint", "invalid value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if job_ids", "------- dict Accumulators for all task. \"\"\" params = {} if include_serialized_value is", "JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns details for a task, with a summary for", "been canceled, otherwise False. \"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if", "specifies the desired parallelism. Returns ------- JobTrigger Object that can be used to", "f\"The provided aggregation modes list contains invalid value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)};", "subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for all", "elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns", "aggregations are: \"min, max, sum, avg\". Default: <all modes> subtask_ids: list List of", "time of the job and to all accumulators created by this job. Endpoint:", "metric_names=None): \"\"\" Provides access to job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id:", "url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def get_checkpointing_configuration(self, job_id): \"\"\" Returns the checkpointing configuration of", "selected execution attempt of a subtask. \"\"\" if attempt_id is None: attempt_id =", "be included in the response. Returns ------- dict Accumulators for all task. \"\"\"", "cancel_job=False): \"\"\" Triggers a savepoint, and optionally cancels the job afterwards. This async", "Returns ------- list List of identifiers. \"\"\" return [elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]]", ")[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def terminate(self, job_id): \"\"\" Terminates a job.", "job. parallelism: int Positive integer value that specifies the desired parallelism. Returns -------", "has been canceled, otherwise False. \"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 )", "for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem[\"value\"] return result def", "contain the host, port pair. \"\"\" self.prefix = f\"{prefix}/jobs\" def all(self): \"\"\" Returns", "Gives access to the execution time of the job and to all accumulators", "job_ids() method. Default: <all taskmanagers>. Returns ------- dict Aggregated job metrics. \"\"\" if", "/jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "maximum watermark before stopping the job. When the watermark is emitted, all event", "process events that depend on this timer (e.g. time windows or process functions).", "/jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional) List of selected specific metric names. Default:", "in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns details for a checkpoint.", "cancels the job afterwards. This async operation would return a JobTrigger for further", "[ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\"", "return dict([(elem[\"id\"], elem[\"value\"]) for elem in query_result]) def get_plan(self, job_id): \"\"\" Returns the", "Returns ------- list List of metric names. \"\"\" return [ elem[\"id\"] for elem", "metric_names: list (optional) List of selected specific metric names. Default: <all metrics> Returns", "\"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def terminate(self, job_id): \"\"\" Terminates", "(e.g. time windows or process functions). This is useful when you want to", "sum, avg\". Default: <all modes> job_ids: list List of 32-character hexadecimal strings to", "a savepoint, and optionally cancels the job afterwards. This async operation would return", "elem[\"value\"] return result def subtasktimes(self): \"\"\" Returns time-related information for all subtasks of", "of a job. This async operation would return a 'triggerid' for further query", "a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self, prefix): \"\"\" Constructor. Parameters", "job_ids. Returns ------- list List of job ids. \"\"\" return [elem[\"id\"] for elem", "in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns the accumulators for all tasks", "a distributed file-system or Object Store. Parameters ---------- job_id: str 32-character hexadecimal string", "metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides access to aggregated subtask metrics. By default", "ids is available through the subtask_ids() method. Default: <all subtasks>. Returns ------- dict", "when you want to fully shut down your job without leaving any unhandled", "job and to all accumulators created by this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters", "return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns the supported metric names. Returns ------- list", "Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. checkpoint_id:", "useful when you want to fully shut down your job without leaving any", "job_id. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "\"\"\" Returns time-related information for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes", "\"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\" Returns the most recent exceptions that", "{','.join(agg_modes)}\" ) if job_ids is None: job_ids = self.job_ids() params = { \"get\":", "identifies a job. metric_names: list (optional) List of selected specific metric names. Default:", "Returns ------- dict Time-related information for all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self):", "sum, avg\". Default: <all modes> subtask_ids: list List of positive integers to select", "accumulators created by this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character", "trigger_id) def get_vertex(self, job_id, vertex_id): \"\"\" Returns a JobVertexClient. Parameters ---------- job_id: str", "ids. \"\"\" return [elem[\"id\"] for elem in self.all()] def overview(self): \"\"\" Returns an", "job_id): \"\"\" Returns the checkpointing configuration of the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config", "to fully shut down your job without leaving any unhandled events or state.", "in case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive integer", "job. checkpoint_id: int Long value that identifies a checkpoint. show_subtasks: bool If it", "\"savepoints\", job_id, trigger_id) def terminate(self, job_id): \"\"\" Terminates a job. Endpoint: [PATCH] /jobs/:jobid", "a job execution. Gives access to the execution time of the job and", "32-character hexadecimal string value that identifies a job. Returns ------- list List of", "Object Store. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "by this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character hexadecimal string", "elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem[\"value\"] return result def subtasktimes(self):", "the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None): \"\"\" Provides access", "\"\"\" Constructor. Parameters ---------- prefix: str REST API url prefix. It must contain", "checkpointing statistics for a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character", "def watermarks(self): \"\"\" Returns the watermarks for all subtasks of a task. Endpoint:", "def get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns details of an execution attempt of a", "for elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns the accumulators for", "= { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\",", "job without leaving any unhandled events or state. Endpoint: [GET] /jobs/:jobid/stop Parameters ----------", "return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns the subtask identifiers. Returns ------- list Positive", "\",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem in subtask_ids]), } query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params", "target_directory, } trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\",", "positive integers to select specific subtasks. The list of valid subtask ids is", "return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\" Triggers a", "information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns the supported metric names. Returns", "Returns details for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint:", "string value that identifies a job. Returns ------- bool True if the job", "Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. parallelism:", "that specifies the desired parallelism. Returns ------- JobTrigger Object that can be used", "/jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive integer value that identifies a subtask. Returns", "show_subtasks: return checkpoint_details subtasks = {} for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request(", "integer value that identifies a subtask. Returns ------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def", "str 32-character hexadecimal string value that identifies a job. Returns ------- dict Checkpointing", "of the current or latest execution attempt of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex", "value that identifies a job. target_directory: str Savepoint target directory. cancel_job: bool If", "an execution attempt. Default: current execution attempt's id Returns ------- dict Details of", "job after the savepoint creation. Returns ------- JobTrigger Object that can be used", "bool If it is True, it also stops the job after the savepoint", "all jobs. Endpoint: [GET] /jobs/overview Returns ------- list List of existing jobs. \"\"\"", ") def get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint ids of the job_id. Parameters ----------", "Returns an overview over all jobs. Endpoint: [GET] /jobs/overview Returns ------- list List", "Notes ----- Using Flink version 1.12, the method will raise RestHandlerException because this", "if attempt_id is None: attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient:", "this rescaling is temporarily disabled. See FLINK-12312. Parameters ---------- job_id: str 32-character hexadecimal", "If it is True, it also stops the job after the savepoint creation.", "that identifies a vertex. Returns ------- JobVertexClient JobVertexClient instance that can execute vertex", "emits the maximum watermark before stopping the job. default: False Returns ------- JobTrigger", "_execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns the accumulators of an", "the response. Returns ------- dict Accumulators for all task. \"\"\" params = {}", "selected specific metric names. Default: <all metrics> Returns ------- dict Job metrics. \"\"\"", "def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST API url prefix.", "taskmanagers(self): \"\"\" Returns task information aggregated by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns", "\"\"\" params = {} if include_serialized_value is not None: params[\"includeSerializedValue\"] = ( \"true\"", "of a job execution. Gives access to the execution time of the job", "Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if job_ids is None:", "latest execution attempt of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int", "\",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem in subtask_ids]), } query_result = _execute_rest_request(", "def prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns the subtask identifiers. Returns -------", "accepted_status_code=202 ) if len(res) < 1: return True else: return False def stop(self,", "modes> subtask_ids: list List of positive integers to select specific subtasks. The list", "async operation would return a 'triggerid' for further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling", "Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns the supported metric names.", "Object that can be used to query the status of savepoint. \"\"\" data", "job_id, vertex_id): \"\"\" Returns a JobVertexClient. Parameters ---------- job_id: str 32-character hexadecimal string", "\"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint ids", "job. Returns ------- bool True if the job has been canceled, otherwise False.", "\"\"\" if attempt_id is None: return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self,", "\"subtasks\": \",\".join([str(elem) for elem in subtask_ids]), } query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params )", "dict Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\" Returns the most", "checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details def", "= subtasks return checkpoint_details def rescale(self, job_id, parallelism): \"\"\" Triggers the rescaling of", "of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators \"\"\" return", "vertex_id): \"\"\" Returns a JobVertexClient. Parameters ---------- job_id: str 32-character hexadecimal string value", "job_id, include_serialized_value=None): \"\"\" Returns the accumulators for all tasks of a job, aggregated", "The execution result of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id,", "that depend on this timer (e.g. time windows or process functions). This is", "[GET] /jobs/:jobid/plan Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "def metrics(self, metric_names=None): \"\"\" Provides access to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns", "a summary for each of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict", "------- bool True if the job has been canceled, otherwise False. \"\"\" res", "are not available. Returns ------- dict Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self):", "identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using Flink version 1.12, the method will", "accumulators of an execution attempt of a subtask. Multiple execution attempts happen in", "a job. Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id: str 32-character hexadecimal string value", "return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint ids of", "that identifies a job. Returns ------- dict Checkpointing configuration of the selected job.", "a vertex. Returns ------- JobVertexClient JobVertexClient instance that can execute vertex related queries.", "job: counts, summary, latest and history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details def rescale(self, job_id, parallelism):", "target directory. cancel_job: bool If it is True, it also stops the job", "of checkpoint ids. \"\"\" return [elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id,", "prefix: str REST API url prefix. It must contain the host, port pair.", "32-character hexadecimal string value that identifies a job. Returns ------- dict The execution", "names. Default: <all metrics> agg_modes: list (optional) List of aggregation modes which should", "the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\" Returns the configuration", "identifiers. \"\"\" return [elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None): \"\"\"", "job_ids = self.job_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), }", "necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status means that the back", "32-character hexadecimal string value that identifies a job. vertex_id: str 32-character hexadecimal string", "True, it also stops the job after the savepoint creation. Returns ------- JobTrigger", "Key-value pairs of metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() supported_agg_modes", "windows or process functions). This is useful when you want to fully shut", "------- dict Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\" Returns the", "query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The target directory has to be", "self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns details for a checkpoint. Endpoint:", "\"targetDirectory\": target_directory, } trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix,", "identifies a job. target_directory: str Savepoint target directory. drain: bool (Optional) If it", "job_id: str 32-character hexadecimal string value that identifies a job. vertex_id: str 32-character", "an execution attempt. Default: current execution attempt's id Returns ------- dict The accumulators", "subtask_id, attempt_id=None): \"\"\" Returns details of an execution attempt of a subtask. Multiple", "job. Returns ------- dict Details of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def", "accumulators of the selected execution attempt of a subtask. \"\"\" if attempt_id is", "Returns all user-defined accumulators for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators", "Boolean value that specifies whether serialized user task accumulators should be included in", "that identifies a job. parallelism: int Positive integer value that specifies the desired", "of a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self, prefix): \"\"\" Constructor.", "Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem return result def get(self, subtask_id):", "max, sum, avg\". Default: <all modes> job_ids: list List of 32-character hexadecimal strings", "\"\"\" Returns details of a job. Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id: str", "Object that can be used to query the status of rescaling. \"\"\" params", "job execution. Gives access to the execution time of the job and to", "its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details for a task. \"\"\"", "\"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params )", "JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def terminate(self, job_id): \"\"\" Terminates a job. Endpoint: [PATCH]", "This async operation would return a 'triggerid' for further query identifier. Endpoint: [GET]", "None: metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params", "/jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns", "identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The target directory has to be a", "selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None): \"\"\" Provides access to", "this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character hexadecimal string value", "int Positive integer value that identifies a subtask. attempt_id: int (Optional) Positive integer", "elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides access to", "[GET] /jobs/:jobid/config Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "canceled, otherwise False. \"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if len(res)", "the most recent exceptions that have been handled by Flink for this job.", "{ \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem in subtask_ids]), } query_result", "True, the details of the subtask are also returned. Returns ------- dict \"\"\"", "a subtask. Multiple execution attempts happen in case of failure/recovery. Parameters ---------- subtask_id:", "deprecated status means that the back pressure stats are not available. Returns -------", "can be used to query the status of savepoint. \"\"\" data = {", "import _execute_rest_request, RestException class JobTrigger: def __init__(self, prefix, type_name, job_id, trigger_id): self._prefix =", "<all taskmanagers>. Returns ------- dict Aggregated job metrics. \"\"\" if metric_names is None:", "job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns details for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If", "a subtask. Returns ------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None): \"\"\"", "operation would return a JobTrigger for further query identifier. Attention: The target directory", "= {} for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem return", ") if job_ids is None: job_ids = self.job_ids() params = { \"get\": \",\".join(metric_names),", "plan of a job. Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id: str 32-character hexadecimal", "response. Returns ------- dict Accumulators for all task. \"\"\" params = {} if", "directory has to be a location accessible by both the JobManager(s) and TaskManager(s)", "task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def", "or process functions). This is useful when you want to fully shut down", "accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def get_vertex(self, job_id, vertex_id): \"\"\"", "that identifies a job. include_serialized_value: bool (Optional) Boolean value that specifies whether serialized", "accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns the supported metric names. Returns", "it returns with all existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names:", "job_id: str 32-character hexadecimal string value that identifies a job. include_serialized_value: bool (Optional)", "List of existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns the supported", "/jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "value that identifies a job. Returns ------- dict The most recent exceptions. \"\"\"", "------- JobVertexClient JobVertexClient instance that can execute vertex related queries. \"\"\" return JobVertexClient(self.prefix,", "params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result = _execute_rest_request(", "False Returns ------- JobTrigger Object that can be used to query the status", "= job_id self.trigger_id = trigger_id @property def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class", ") if len(res) < 1: return True else: return False def stop(self, job_id,", "hexadecimal string value that identifies a job. Returns ------- dict Details of the", "provided aggregation modes list contains invalid value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given", "Returns ------- dict Job metrics. \"\"\" if metric_names is None: metric_names = self.metric_names()", "_execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns an overview over all", "not show_subtasks: return checkpoint_details subtasks = {} for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] =", "---------- metric_names: list (optional) List of selected specific metric names. Default: <all metrics>", "return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns details for", "host, port pair. \"\"\" self._prefix = prefix @property def prefix_url(self): return f\"{self._prefix}/subtasks\" def", "/jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive integer value that identifies a subtask. attempt_id:", "if the job has been canceled, otherwise False. \"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\",", "[elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns details", "[GET] /jobs/:jobid/savepoints Notes ----- The target directory has to be a location accessible", "return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns all user-defined accumulators", "of a job. Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id: str 32-character hexadecimal string", "\"\"\" Returns the dataflow plan of a job. Endpoint: [GET] /jobs/:jobid/plan Parameters ----------", "/jobs/:jobid/stop Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "False def stop(self, job_id, target_directory, drain=False): \"\"\" Stops a job with a savepoint.", "of the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character hexadecimal", "\"avg\"] if agg_modes is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise", "attempt. Default: current execution attempt's id Returns ------- dict Details of the selected", "identifies a job. checkpoint_id: int Long value that identifies a checkpoint. show_subtasks: bool", "parallelism): \"\"\" Triggers the rescaling of a job. This async operation would return", "watermarks(self): \"\"\" Returns the watermarks for all subtasks of a task. Endpoint: [GET]", "Returns ------- dict \"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not show_subtasks:", "self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result =", "value that identifies a job. parallelism: int Positive integer value that specifies the", "\"\"\" Returns all user-defined accumulators for all subtasks of a task. Endpoint: [GET]", "return a JobTrigger for further query identifier. Attention: The target directory has to", "execution time of the job and to all accumulators created by this job.", "\"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not show_subtasks: return checkpoint_details subtasks", "or Object Store. Draining emits the maximum watermark before stopping the job. When", "job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- list", "Returns checkpointing statistics for a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str", "\"min, max, sum, avg\". Default: <all modes> job_ids: list List of 32-character hexadecimal", "True, it emits the maximum watermark before stopping the job. default: False Returns", "value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if job_ids is", "------- dict \"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not show_subtasks: return", "dict details for a task. \"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns back-pressure", "the selected attempt. \"\"\" if attempt_id is None: return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\"", "(Optional) Boolean value that specifies whether serialized user task accumulators should be included", "Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if subtask_ids is None:", "Returns the supported metric names. Returns ------- list List of metric names. \"\"\"", "that identifies a job. checkpoint_id: int Long value that identifies a checkpoint. show_subtasks:", "include_serialized_value is not None: params[\"includeSerializedValue\"] = ( \"true\" if include_serialized_value else \"false\" )", "a subtask. Multiple execution attempts happen in case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt", "Savepoint target directory. drain: bool (Optional) If it is True, it emits the", "params=params ) result = {} for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name]", "] def metrics(self, metric_names=None): \"\"\" Provides access to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics", "task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information for all subtasks \"\"\"", "Returns ------- dict The accumulators of the selected execution attempt of a subtask.", "dict Time-related information for all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns", "= self.job_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result", "params[\"includeSerializedValue\"] = ( \"true\" if include_serialized_value else \"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\",", "get(self, subtask_id): \"\"\" Returns details of the current or latest execution attempt of", "for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks", "hexadecimal string value that identifies a job. parallelism: int Positive integer value that", "the accumulators of an execution attempt of a subtask. Multiple execution attempts happen", "Object Store. Draining emits the maximum watermark before stopping the job. When the", "available through the subtask_ids() method. Default: <all subtasks>. Returns ------- dict Key-value pairs", "of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None): \"\"\" Provides", ") return dict([(elem[\"id\"], elem[\"value\"]) for elem in query_result]) def get_plan(self, job_id): \"\"\" Returns", "the subtask are also returned. Returns ------- dict \"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\",", "/jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "valid jobs are available through the job_ids() method. Default: <all taskmanagers>. Returns -------", "list Positive integer list of subtask ids. \"\"\" return [elem[\"subtask\"] for elem in", "str 32-character hexadecimal string value that identifies a job. Returns ------- dict The", "checkpoint_details subtasks = {} for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\",", "of metric names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def", "not available. Returns ------- dict Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\"", "rescaling. \"\"\" params = {\"parallelism\": parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"]", "self._prefix = prefix self._type_name = type_name self.job_id = job_id self.trigger_id = trigger_id @property", "Accumulators for all task. \"\"\" params = {} if include_serialized_value is not None:", "subtask_id, attempt_id=None): \"\"\" Returns the accumulators of an execution attempt of a subtask.", "------- dict Checkpointing configuration of the selected job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\"", "if metric_names is None: metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result =", "operation would return a JobTrigger for further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes", "location on a distributed file-system or Object Store. Draining emits the maximum watermark", "optionally cancels the job afterwards. This async operation would return a JobTrigger for", "List of jobs and their current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\"", ") def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns the accumulators of an execution attempt", "aggregation modes list contains invalid value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list:", "str 32-character hexadecimal string value that identifies a job. vertex_id: str 32-character hexadecimal", "integer value that identifies a subtask. attempt_id: int (Optional) Positive integer value that", "on this timer (e.g. time windows or process functions). This is useful when", "must contain the host, port pair. \"\"\" self.prefix = f\"{prefix}/jobs\" def all(self): \"\"\"", "all jobs. Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names: list (optional) List of selected", "aggregated by task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns the watermarks", "[elem[\"id\"] for elem in self.all()] def overview(self): \"\"\" Returns an overview over all", "def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns details for a task, with", "job_id self.trigger_id = trigger_id @property def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient:", "RestHandlerException because this rescaling is temporarily disabled. See FLINK-12312. Parameters ---------- job_id: str", "details of an execution attempt of a subtask. Multiple execution attempts happen in", "overview over all jobs. Endpoint: [GET] /jobs/overview Returns ------- list List of existing", "Returns the list of job_ids. Returns ------- list List of job ids. \"\"\"", "subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators \"\"\"", "list List of checkpoint ids. \"\"\" return [elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def", "subtask. Multiple execution attempts happen in case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters", "\",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for elem in", "file-system or Object Store. Draining emits the maximum watermark before stopping the job.", "return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None): \"\"\" Provides access to job metrics. Endpoint:", "----- Using Flink version 1.12, the method will raise RestHandlerException because this rescaling", "identifies a job. Returns ------- dict The most recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\")", "job_id): \"\"\" Returns checkpointing statistics for a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ----------", "Positive integer value that identifies an execution attempt. Default: current execution attempt's id", "< 1: return True else: return False def stop(self, job_id, target_directory, drain=False): \"\"\"", "------- dict details for a task. \"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns", "return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None): \"\"\" Provides", "identifies a job. Returns ------- list List of checkpoint ids. \"\"\" return [elem[\"id\"]", "dict Checkpointing configuration of the selected job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" )", "a job. Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id: str 32-character hexadecimal string value", "location accessible by both the JobManager(s) and TaskManager(s) e.g. a location on a", "of a subtask. Multiple execution attempts happen in case of failure/recovery. Endpoint: [GET]", "a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters", "job_id): \"\"\" Returns the dataflow plan of a job. Endpoint: [GET] /jobs/:jobid/plan Parameters", "of a job, aggregated across the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ----------", "[elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns the accumulators", "without leaving any unhandled events or state. Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id:", "value that identifies a job. Returns ------- dict Checkpointing configuration of the selected", "params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {}", "Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names: list (optional) List of selected specific metric", "include_serialized_value=None): \"\"\" Returns the accumulators for all tasks of a job, aggregated across", "metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional) List of selected", "def all(self): \"\"\" Returns an overview over all jobs and their current state.", "manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns the watermarks for all subtasks", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"], elem[\"value\"]) for elem in query_result]) def get_plan(self,", "are: \"min, max, sum, avg\". Default: <all modes> job_ids: list List of 32-character", "__init__(self, prefix, job_id, vertex_id): \"\"\" Constructor. Parameters ---------- prefix: str REST API url", "of the job and to all accumulators created by this job. Endpoint: [GET]", "on a distributed file-system or Object Store. Draining emits the maximum watermark before", "params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"], elem[\"value\"])", "for elem in subtask_ids]), } query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result =", "self._prefix = prefix @property def prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns the", "back-pressure information for a job, and may initiate back-pressure sampling if necessary. Endpoint:", "job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self, job_id): \"\"\" Returns checkpointing", "\",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result = {}", "Stops a job with a savepoint. This async operation would return a JobTrigger", "def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides access to aggregated subtask metrics. By", "self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns the accumulators", "return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns the watermarks for all subtasks of a", "if agg_modes is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException(", "dict Job metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() params =", "user-defined accumulators for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns -------", "file-system or Object Store. Parameters ---------- job_id: str 32-character hexadecimal string value that", "subtask_ids=None): \"\"\" Provides access to aggregated subtask metrics. By default it returns with", "for further query identifier. Attention: The target directory has to be a location", "dict Key-value pairs of metrics. \"\"\" if metric_names is None: metric_names = self.metric_names()", "List of metric names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ]", "job_id self.vertex_id = vertex_id @property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return", "list contains invalid value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" )", "dict The accumulators of the selected execution attempt of a subtask. \"\"\" if", "class JobTrigger: def __init__(self, prefix, type_name, job_id, trigger_id): self._prefix = prefix self._type_name =", "REST API url prefix. It must contain the host, port pair. \"\"\" self.prefix", "execution attempt's id Returns ------- dict The accumulators of the selected execution attempt", "target directory. drain: bool (Optional) If it is True, it emits the maximum", "Provides access to aggregated subtask metrics. By default it returns with all existing", "[GET] /jobs/overview Returns ------- list List of existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def", "with all existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional)", ") result = {} for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] =", "metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)}", "Details of the selected attempt. \"\"\" if attempt_id is None: return self.get(subtask_id) return", "subtask_ids is None: subtask_ids = self.subtask_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes),", "of failure/recovery. Parameters ---------- subtask_id: int Positive integer value that identifies a subtask.", "after the savepoint creation. Returns ------- JobTrigger Object that can be used to", "job. Returns ------- dict Checkpointing configuration of the selected job. \"\"\" return _execute_rest_request(", "vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks return", "for the selected job: counts, summary, latest and history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\",", "= _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for elem in query_result: metric_name", "JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def get_vertex(self, job_id, vertex_id): \"\"\" Returns a JobVertexClient. Parameters", "a job. parallelism: int Positive integer value that specifies the desired parallelism. Returns", "metrics. By default it returns with all existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics", "{\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for elem", "\"\"\" Returns the ids of vertices of the selected job. Parameters ---------- job_id:", "job. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "\",\".join([str(elem) for elem in subtask_ids]), } query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result", "value that identifies a job. Returns ------- list List of checkpoint ids. \"\"\"", "\"\"\" Returns a JobVertexClient. Parameters ---------- job_id: str 32-character hexadecimal string value that", "of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details for a task.", "url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str", "_execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns the watermarks for all subtasks of a task.", "def get_vertex(self, job_id, vertex_id): \"\"\" Returns a JobVertexClient. Parameters ---------- job_id: str 32-character", "Returns ------- dict Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns the", "self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns all user-defined accumulators for all subtasks of a", "metric_name = elem.pop(\"id\") result[metric_name] = elem[\"value\"] return result def subtasktimes(self): \"\"\" Returns time-related", "/jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information for all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def", "checkpoint ids of the job_id. Parameters ---------- job_id: str 32-character hexadecimal string value", "directory. cancel_job: bool If it is True, it also stops the job after", "job, aggregated across the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str", "len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f\"The provided aggregation modes list contains invalid value.", "hexadecimal string value that identifies a vertex. Returns ------- JobVertexClient JobVertexClient instance that", "details of the subtask are also returned. Returns ------- dict \"\"\" checkpoint_details =", "method. Default: <all subtasks>. Returns ------- dict Key-value pairs of metrics. \"\"\" if", "Checkpointing configuration of the selected job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def", "identifies a subtask. attempt_id: int (Optional) Positive integer value that identifies an execution", "def stop(self, job_id, target_directory, drain=False): \"\"\" Stops a job with a savepoint. This", "if job_ids is None: job_ids = self.job_ids() params = { \"get\": \",\".join(metric_names), \"agg\":", "self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def __init__(self, prefix, job_id, vertex_id): \"\"\"", "is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f\"The provided", "list: {','.join(agg_modes)}\" ) if job_ids is None: job_ids = self.job_ids() params = {", "self.trigger_id = trigger_id @property def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def", "current state. Endpoint: [GET] /jobs Returns ------- list List of jobs and their", "a location on a distributed file-system or Object Store. Draining emits the maximum", "dict Checkpointing statistics for the selected job: counts, summary, latest and history. \"\"\"", "32-character hexadecimal string value that identifies a job. Returns ------- bool True if", "subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details for a task. \"\"\" return", "def subtask_ids(self): \"\"\" Returns the subtask identifiers. Returns ------- list Positive integer list", "id Returns ------- dict Details of the selected attempt. \"\"\" if attempt_id is", "= self.subtask_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem", "all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for", "existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns the supported metric names.", "of savepoint. \"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory},", "f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns details for a", "overview over all jobs and their current state. Endpoint: [GET] /jobs Returns -------", "be used to query the status of savepoint. \"\"\" data = { \"drain\":", "metric_names is None: metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request(", "\"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if len(res) < 1: return", "raise RestException( f\"The provided aggregation modes list contains invalid value. Supported aggregation \"", "Returns ------- dict Details of the selected attempt. \"\"\" if attempt_id is None:", "return a 'triggerid' for further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using", "trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def", "the current or latest execution attempt of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters", "subtask. attempt_id: int (Optional) Positive integer value that identifies an execution attempt. Default:", "None: job_ids = self.job_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids),", "job_id): \"\"\" Returns the result of a job execution. Gives access to the", "query identifier. Attention: The target directory has to be a location accessible by", "_execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides access to aggregated subtask", "you want to fully shut down your job without leaving any unhandled events", "of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive integer value", "Returns checkpoint ids of the job_id. Parameters ---------- job_id: str 32-character hexadecimal string", "trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\" Triggers a savepoint, and optionally cancels", "Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "list of subtask ids. \"\"\" return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]] def accumulators(self):", "by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information aggregated by", "subtask metrics. By default it returns with all existing metric names. Endpoint: [GET]", "attempt_id=None): \"\"\" Returns the accumulators of an execution attempt of a subtask. Multiple", "value that identifies a job. Returns ------- bool True if the job has", "<all metrics> agg_modes: list (optional) List of aggregation modes which should be calculated.", "Triggers a savepoint, and optionally cancels the job afterwards. This async operation would", "Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\" Returns the most recent", "identifies a job. Returns ------- dict Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self,", "maximum watermark before stopping the job. default: False Returns ------- JobTrigger Object that", "subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns task information aggregated by task", "modes which should be calculated. Available aggregations are: \"min, max, sum, avg\". Default:", "with a savepoint. This async operation would return a JobTrigger for further query", "self.prefix = f\"{prefix}/jobs\" def all(self): \"\"\" Returns an overview over all jobs and", "raise RestHandlerException because this rescaling is temporarily disabled. See FLINK-12312. Parameters ---------- job_id:", "be a location accessible by both the JobManager(s) and TaskManager(s) e.g. a location", "stats are not available. Returns ------- dict Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def", "\"\"\" Provides access to aggregated subtask metrics. By default it returns with all", "checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not show_subtasks: return checkpoint_details subtasks =", "\"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self, job_id): \"\"\" Returns checkpointing statistics", "[GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "of rescaling. \"\"\" params = {\"parallelism\": parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params", "TaskManager(s) e.g. a location on a distributed file-system or Object Store. Parameters ----------", "http_method=\"GET\" ) def get_checkpoints(self, job_id): \"\"\" Returns checkpointing statistics for a job. Endpoint:", "JobVertexSubtaskClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST API url", "http_method=\"GET\" ) def get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint ids of the job_id. Parameters", "url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details def rescale(self, job_id, parallelism): \"\"\"", "= self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result", "string value that identifies a job. Returns ------- dict Job configuration \"\"\" return", "self.vertex_id = vertex_id @property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url)", "bool (Optional) If it is True, it emits the maximum watermark before stopping", "execution attempt. Default: current execution attempt's id Returns ------- dict Details of the", "should be included in the response. Returns ------- dict Accumulators for all task.", "params = {} if include_serialized_value is not None: params[\"includeSerializedValue\"] = ( \"true\" if", "it also stops the job after the savepoint creation. Returns ------- JobTrigger Object", "and their current state. Endpoint: [GET] /jobs Returns ------- list List of jobs", "= elem.pop(\"id\") result[metric_name] = elem[\"value\"] return result def subtasktimes(self): \"\"\" Returns time-related information", "1: return True else: return False def stop(self, job_id, target_directory, drain=False): \"\"\" Stops", "jobs. Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names: list (optional) List of selected specific", "attempt of a subtask. Multiple execution attempts happen in case of failure/recovery. Endpoint:", "dict Task metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() params =", "\"savepoints\", job_id, trigger_id) def get_vertex(self, job_id, vertex_id): \"\"\" Returns a JobVertexClient. Parameters ----------", "32-character hexadecimal string value that identifies a job. Returns ------- dict Checkpointing configuration", "str 32-character hexadecimal string value that identifies a job. Returns ------- dict Dataflow", "or Object Store. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive integer value that identifies", "agg_modes: list (optional) List of aggregation modes which should be calculated. Available aggregations", "created by this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character hexadecimal", "used to query the status of savepoint. \"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\",", "plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\" Returns the ids of vertices", "def get_exceptions(self, job_id): \"\"\" Returns the most recent exceptions that have been handled", "Notes ----- The target directory has to be a location accessible by both", "overview(self): \"\"\" Returns an overview over all jobs. Endpoint: [GET] /jobs/overview Returns -------", "= _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if len(res) < 1: return True else:", "None: metric_names = self.metric_names() supported_agg_modes = [\"min\", \"max\", \"sum\", \"avg\"] if agg_modes is", "job_id, parallelism): \"\"\" Triggers the rescaling of a job. This async operation would", "elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem return result def get(self,", "------- dict Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\" Returns the", "---------- prefix: str REST API url prefix. It must contain the host, port", "else \"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def get_checkpointing_configuration(self, job_id): \"\"\"", "The deprecated status means that the back pressure stats are not available. Returns", "for a task. \"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns back-pressure information for", "[GET] /jobs/:jobid/stop Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "hexadecimal string value that identifies a job. Returns ------- bool True if the", "Terminates a job. Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string", "task. \"\"\" params = {} if include_serialized_value is not None: params[\"includeSerializedValue\"] = (", "dict Details of the selected attempt. \"\"\" if attempt_id is None: return self.get(subtask_id)", "return [elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns the", "def details(self): \"\"\" Returns details for a task, with a summary for each", "{ \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params", "result def subtasktimes(self): \"\"\" Returns time-related information for all subtasks of a task.", "url prefix. It must contain the host, port pair. \"\"\" self._prefix = prefix", "the supported metric names. Returns ------- list List of metric names. \"\"\" return", "pair. \"\"\" self.prefix = f\"{prefix}/jobs\" def all(self): \"\"\" Returns an overview over all", "the job after the savepoint creation. Returns ------- JobTrigger Object that can be", "self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"],", "elem return result def get(self, subtask_id): \"\"\" Returns details of the current or", "metrics> agg_modes: list (optional) List of aggregation modes which should be calculated. Available", "Default: <all subtasks>. Returns ------- dict Key-value pairs of metrics. \"\"\" if metric_names", "counts, summary, latest and history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self,", "watermarks for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list", "\"\"\" Stops a job with a savepoint. This async operation would return a", "def get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint ids of the job_id. Parameters ---------- job_id:", "identifies a job. parallelism: int Positive integer value that specifies the desired parallelism.", "metric_name = elem.pop(\"id\") result[metric_name] = elem return result def get(self, subtask_id): \"\"\" Returns", "accumulators for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict", "to query the status of savepoint. \"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202,", "subtasks of a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self, prefix): \"\"\"", "Returns ------- bool True if the job has been canceled, otherwise False. \"\"\"", "\"min, max, sum, avg\". Default: <all modes> subtask_ids: list List of positive integers", "that identifies a job. Returns ------- list List of identifiers. \"\"\" return [elem[\"id\"]", "job. include_serialized_value: bool (Optional) Boolean value that specifies whether serialized user task accumulators", "------- list Positive integer list of subtask ids. \"\"\" return [elem[\"subtask\"] for elem", "Object that can be used to query the status of savepoint. \"\"\" trigger_id", "vertex_id @property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self):", "Positive integer list of subtask ids. \"\"\" return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]]", "list List of existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns the", "def get_checkpoints(self, job_id): \"\"\" Returns checkpointing statistics for a job. Endpoint: [GET] /jobs/:jobid/checkpoints", "a job. target_directory: str Savepoint target directory. cancel_job: bool If it is True,", "job_ids(self): \"\"\" Returns the list of job_ids. Returns ------- list List of job", "for further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using Flink version 1.12,", "\"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns the list of job_ids. Returns -------", "Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. include_serialized_value:", "string value that identifies a job. Returns ------- dict Checkpointing configuration of the", "the dataflow plan of a job. Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id: str", "job. metric_names: list (optional) List of selected specific metric names. Default: <all metrics>", "(Optional) If it is True, it emits the maximum watermark before stopping the", "their current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns the list of", "the maximum watermark before stopping the job. default: False Returns ------- JobTrigger Object", "accumulators for all tasks of a job, aggregated across the respective subtasks. Endpoint:", "elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns the accumulators for all", "Available aggregations are: \"min, max, sum, avg\". Default: <all modes> subtask_ids: list List", "None: attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def __init__(self, prefix,", "version 1.12, the method will raise RestHandlerException because this rescaling is temporarily disabled.", "for all tasks of a job, aggregated across the respective subtasks. Endpoint: [GET]", "a 'triggerid' for further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using Flink", "status of rescaling. \"\"\" params = {\"parallelism\": parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\",", "\",\".join(job_ids), } query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result = {} for elem", "sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status means that", "list Watermarks for all subtasks of a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient:", "------- list List of existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns", "watermark is emitted, all event time timers will fire, allowing you to process", "bool True if the job has been canceled, otherwise False. \"\"\" res =", "def get_vertex_ids(self, job_id): \"\"\" Returns the ids of vertices of the selected job.", "value that identifies an execution attempt. Default: current execution attempt's id Returns -------", "hexadecimal string value that identifies a job. vertex_id: str 32-character hexadecimal string value", "dict Accumulators for all task. \"\"\" params = {} if include_serialized_value is not", "integer list of subtask ids. \"\"\" return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]] def", "aggregations are: \"min, max, sum, avg\". Default: <all modes> job_ids: list List of", "access to aggregated subtask metrics. By default it returns with all existing metric", "is temporarily disabled. See FLINK-12312. Parameters ---------- job_id: str 32-character hexadecimal string value", "def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns", "the job. default: False Returns ------- JobTrigger Object that can be used to", "that identifies a checkpoint. show_subtasks: bool If it is True, the details of", "res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if len(res) < 1: return True", "( \"true\" if include_serialized_value else \"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params )", "= prefix self.job_id = job_id self.vertex_id = vertex_id @property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\"", "parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id)", "str 32-character hexadecimal string value that identifies a job. Returns ------- dict Details", "of a job. Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string", "tasks of a job, aggregated across the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters", "\"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def get_checkpointing_configuration(self, job_id): \"\"\" Returns", "a task, with a summary for each of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid", "event time timers will fire, allowing you to process events that depend on", "an overview over all jobs. Endpoint: [GET] /jobs/overview Returns ------- list List of", "for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks", "value that identifies a subtask. attempt_id: int (Optional) Positive integer value that identifies", "The list of valid subtask ids is available through the subtask_ids() method. Default:", "distributed file-system or Object Store. Draining emits the maximum watermark before stopping the", "the execution time of the job and to all accumulators created by this", "Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "information aggregated by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information", "_execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns the supported metric names. Returns ------- list List", "through the job_ids() method. Default: <all taskmanagers>. Returns ------- dict Aggregated job metrics.", "------- list List of jobs and their current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def", "query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for elem in query_result:", "from flink_rest_client.common import _execute_rest_request, RestException class JobTrigger: def __init__(self, prefix, type_name, job_id, trigger_id):", "----- The deprecated status means that the back pressure stats are not available.", "} query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result = {} for elem in", "hexadecimal string value that identifies a job. Returns ------- dict Checkpointing configuration of", "{} for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] =", ") checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details def rescale(self, job_id, parallelism): \"\"\" Triggers the", "str Savepoint target directory. drain: bool (Optional) If it is True, it emits", "that identifies a job. Returns ------- dict Details of the selected job. \"\"\"", "job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- bool", "given list: {','.join(agg_modes)}\" ) if subtask_ids is None: subtask_ids = self.subtask_ids() params =", "agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f\"The provided aggregation modes", "should be calculated. Available aggregations are: \"min, max, sum, avg\". Default: <all modes>", "recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\" Returns the result of", "savepoint. This async operation would return a JobTrigger for further query identifier. Attention:", "a distributed file-system or Object Store. Draining emits the maximum watermark before stopping", "timers will fire, allowing you to process events that depend on this timer", "Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "all user-defined accumulators for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information for all subtasks \"\"\" return", "subtask. Returns ------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns", "rescaling is temporarily disabled. See FLINK-12312. Parameters ---------- job_id: str 32-character hexadecimal string", "data = { \"drain\": False if drain is None else drain, \"targetDirectory\": target_directory,", "Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns", "is emitted, all event time timers will fire, allowing you to process events", "been handled by Flink for this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id:", "Returns the result of a job execution. Gives access to the execution time", "in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None): \"\"\" Provides access to task metrics. Endpoint:", "def terminate(self, job_id): \"\"\" Terminates a job. Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id:", "get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns details for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid", "job_ids=None): \"\"\" Returns an overview over all jobs. Endpoint: [GET] /jobs/metrics Parameters ----------", "[GET] /jobs/:jobid/rescaling Notes ----- Using Flink version 1.12, the method will raise RestHandlerException", "current or latest execution attempt of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ----------", "timer (e.g. time windows or process functions). This is useful when you want", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status means that the back pressure", "elem.pop(\"id\") result[metric_name] = elem return result def get(self, subtask_id): \"\"\" Returns details of", "may initiate back-pressure sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated", "checkpoint ids. \"\"\" return [elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id,", "/jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character", "[GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics. \"\"\" if metric_names is None: metric_names", "= _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not show_subtasks: return checkpoint_details subtasks = {}", "a job. Returns ------- dict Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id):", "names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None):", "\"\"\" Returns details for a task, with a summary for each of its", "selected job. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "of valid jobs are available through the job_ids() method. Default: <all taskmanagers>. Returns", "Attention: The target directory has to be a location accessible by both the", "self.subtask_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem in", "= { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem in subtask_ids]), }", "execution attempt of a subtask. Multiple execution attempts happen in case of failure/recovery.", "must contain the host, port pair. \"\"\" self._prefix = prefix self.job_id = job_id", "dict Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\" Returns the ids", "list List of job ids. \"\"\" return [elem[\"id\"] for elem in self.all()] def", "_execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns back-pressure information for a job, and may initiate", "a job. Returns ------- dict Checkpointing configuration of the selected job. \"\"\" return", "subtask. Multiple execution attempts happen in case of failure/recovery. Parameters ---------- subtask_id: int", "{\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"], elem[\"value\"]) for elem", "32-character hexadecimal string value that identifies a job. parallelism: int Positive integer value", "] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides access to aggregated subtask metrics.", "value that identifies a job. vertex_id: str 32-character hexadecimal string value that identifies", "\"\"\" data = { \"drain\": False if drain is None else drain, \"targetDirectory\":", "configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\" Returns the most recent exceptions", "------- dict Job metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() params", "url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id)", "self.metric_names() supported_agg_modes = [\"min\", \"max\", \"sum\", \"avg\"] if agg_modes is None: agg_modes =", "Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\" Returns the ids of", "for all subtasks of a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self,", "= elem.pop(\"id\") result[metric_name] = elem return result def get(self, job_id): \"\"\" Returns details", "prefix. It must contain the host, port pair. \"\"\" self._prefix = prefix @property", "back-pressure sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status means", "rescale(self, job_id, parallelism): \"\"\" Triggers the rescaling of a job. This async operation", "avg\". Default: <all modes> subtask_ids: list List of positive integers to select specific", "jobs are available through the job_ids() method. Default: <all taskmanagers>. Returns ------- dict", "Endpoint: [GET] /jobs Returns ------- list List of jobs and their current state.", "= elem return result def get(self, job_id): \"\"\" Returns details of a job.", "Returns details of a job. Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id: str 32-character", "list (optional) List of selected specific metric names. Default: <all metrics> agg_modes: list", "access to the execution time of the job and to all accumulators created", "Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id:", "given list: {','.join(agg_modes)}\" ) if job_ids is None: job_ids = self.job_ids() params =", "it emits the maximum watermark before stopping the job. default: False Returns -------", "elem[\"value\"]) for elem in query_result]) def get_plan(self, job_id): \"\"\" Returns the dataflow plan", "job_id, trigger_id): self._prefix = prefix self._type_name = type_name self.job_id = job_id self.trigger_id =", "afterwards. This async operation would return a JobTrigger for further query identifier. Endpoint:", "ids of the job_id. Parameters ---------- job_id: str 32-character hexadecimal string value that", "over all jobs. Endpoint: [GET] /jobs/overview Returns ------- list List of existing jobs.", "/jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics. \"\"\" if metric_names is None: metric_names =", "be calculated. Available aggregations are: \"min, max, sum, avg\". Default: <all modes> job_ids:", "hexadecimal string value that identifies a job. Returns ------- dict Dataflow plan \"\"\"", "watermark before stopping the job. When the watermark is emitted, all event time", "Returns the checkpointing configuration of the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ----------", "details for a task, with a summary for each of its subtasks. Endpoint:", "\"\"\" Returns an overview over all jobs. Endpoint: [GET] /jobs/overview Returns ------- list", "get_checkpointing_configuration(self, job_id): \"\"\" Returns the checkpointing configuration of the selected job_id Endpoint: [GET]", "vertex_id): \"\"\" Constructor. Parameters ---------- prefix: str REST API url prefix. It must", "identifies a job. target_directory: str Savepoint target directory. cancel_job: bool If it is", "integers to select specific subtasks. The list of valid subtask ids is available", "checkpoint_details def rescale(self, job_id, parallelism): \"\"\" Triggers the rescaling of a job. This", "dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns details of an", "Returns the dataflow plan of a job. Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id:", "specific subtasks. The list of valid subtask ids is available through the subtask_ids()", "details(self): \"\"\" Returns details for a task, with a summary for each of", "----- The target directory has to be a location accessible by both the", "this timer (e.g. time windows or process functions). This is useful when you", "\"\"\" return [elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None): \"\"\" Returns", "http_method=\"GET\", params=params ) def get_checkpointing_configuration(self, job_id): \"\"\" Returns the checkpointing configuration of the", "job_id, trigger_id) def get_vertex(self, job_id, vertex_id): \"\"\" Returns a JobVertexClient. Parameters ---------- job_id:", "execution attempt. Default: current execution attempt's id Returns ------- dict The accumulators of", "\"\"\" self.prefix = f\"{prefix}/jobs\" def all(self): \"\"\" Returns an overview over all jobs", "is useful when you want to fully shut down your job without leaving", "the rescaling of a job. This async operation would return a 'triggerid' for", "this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character hexadecimal string value", "get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns the accumulators of an execution attempt of a", "self._prefix = prefix self.job_id = job_id self.vertex_id = vertex_id @property def prefix_url(self): return", "------- dict Key-value pairs of metrics. \"\"\" if metric_names is None: metric_names =", "attempt of a subtask. Multiple execution attempts happen in case of failure/recovery. Parameters", "in query_result]) def get_plan(self, job_id): \"\"\" Returns the dataflow plan of a job.", "of an execution attempt of a subtask. Multiple execution attempts happen in case", "e.g. a location on a distributed file-system or Object Store. Parameters ---------- job_id:", "= trigger_id @property def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def __init__(self,", "value that identifies a job. target_directory: str Savepoint target directory. drain: bool (Optional)", "elem.pop(\"id\") result[metric_name] = elem[\"value\"] return result def subtasktimes(self): \"\"\" Returns time-related information for", "str 32-character hexadecimal string value that identifies a job. Returns ------- list List", "prefix. It must contain the host, port pair. \"\"\" self._prefix = prefix self.job_id", "recent exceptions that have been handled by Flink for this job. Endpoint: [GET]", "\"\"\" Provides access to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task", "in self.all()] def overview(self): \"\"\" Returns an overview over all jobs. Endpoint: [GET]", "metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() supported_agg_modes = [\"min\", \"max\",", "@property def prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns the subtask identifiers. Returns", "metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params )", "the watermarks for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns -------", "stops the job after the savepoint creation. Returns ------- JobTrigger Object that can", "it is True, it emits the maximum watermark before stopping the job. default:", "task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics. \"\"\" if metric_names", "return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\" Returns the result of a job execution.", "the job. When the watermark is emitted, all event time timers will fire,", "json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def get_vertex(self, job_id, vertex_id): \"\"\" Returns", "Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "len(res) < 1: return True else: return False def stop(self, job_id, target_directory, drain=False):", "shut down your job without leaving any unhandled events or state. Endpoint: [GET]", "list List of metric names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\")", "all(self): \"\"\" Returns an overview over all jobs and their current state. Endpoint:", "result def get(self, subtask_id): \"\"\" Returns details of the current or latest execution", "= ( \"true\" if include_serialized_value else \"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params", "job_id, target_directory, drain=False): \"\"\" Stops a job with a savepoint. This async operation", "value that identifies a vertex. Returns ------- JobVertexClient JobVertexClient instance that can execute", "state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns the list of job_ids. Returns", "\"\"\" Returns checkpointing statistics for a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id:", "dict \"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if not show_subtasks: return checkpoint_details", "is True, it also stops the job after the savepoint creation. Returns -------", "get_metrics(self, job_id, metric_names=None): \"\"\" Provides access to job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters", "be used to query the status of rescaling. \"\"\" params = {\"parallelism\": parallelism}", "accumulators(self): \"\"\" Returns all user-defined accumulators for all subtasks of a task. Endpoint:", "the host, port pair. \"\"\" self._prefix = prefix self.job_id = job_id self.vertex_id =", "{} if include_serialized_value is not None: params[\"includeSerializedValue\"] = ( \"true\" if include_serialized_value else", "identifiers. Returns ------- list Positive integer list of subtask ids. \"\"\" return [elem[\"subtask\"]", "job_id): \"\"\" Returns the most recent exceptions that have been handled by Flink", "by Flink for this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character", "of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information for all", "job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None): \"\"\" Provides access to job", "for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns details for", "included in the response. Returns ------- dict Accumulators for all task. \"\"\" params", "self.all()] def overview(self): \"\"\" Returns an overview over all jobs. Endpoint: [GET] /jobs/overview", "Returns ------- dict The most recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id):", "job_id, target_directory, cancel_job=False): \"\"\" Triggers a savepoint, and optionally cancels the job afterwards.", "would return a 'triggerid' for further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes -----", "summary for each of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details", "Draining emits the maximum watermark before stopping the job. When the watermark is", "selected attempt. \"\"\" if attempt_id is None: return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" )", "class JobVertexClient: def __init__(self, prefix, job_id, vertex_id): \"\"\" Constructor. Parameters ---------- prefix: str", "_execute_rest_request, RestException class JobTrigger: def __init__(self, prefix, type_name, job_id, trigger_id): self._prefix = prefix", "32-character hexadecimal string value that identifies a vertex. Returns ------- JobVertexClient JobVertexClient instance", "query the status of savepoint. \"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\":", "RestException class JobTrigger: def __init__(self, prefix, type_name, job_id, trigger_id): self._prefix = prefix self._type_name", "\"\"\" Returns the list of job_ids. Returns ------- list List of job ids.", "None else drain, \"targetDirectory\": target_directory, } trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data,", "state. Endpoint: [GET] /jobs Returns ------- list List of jobs and their current", "{','.join(agg_modes)}\" ) if subtask_ids is None: subtask_ids = self.subtask_ids() params = { \"get\":", "metric_names = self.metric_names() supported_agg_modes = [\"min\", \"max\", \"sum\", \"avg\"] if agg_modes is None:", "further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The target directory has to", "{\"parallelism\": parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id,", "target_directory, drain=False): \"\"\" Stops a job with a savepoint. This async operation would", "metric_names: list (optional) List of selected specific metric names. Default: <all metrics> agg_modes:", "if drain is None else drain, \"targetDirectory\": target_directory, } trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\",", "Returns ------- list Watermarks for all subtasks of a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\")", "_execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns the supported metric names. Returns ------- list List", "over all jobs and their current state. Endpoint: [GET] /jobs Returns ------- list", "if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f\"The provided aggregation modes list contains invalid", "subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns details for a task, with a", "[GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "in case of failure/recovery. Parameters ---------- subtask_id: int Positive integer value that identifies", "/jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "dict The execution result of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self,", "url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint ids of the job_id.", "a job. checkpoint_id: int Long value that identifies a checkpoint. show_subtasks: bool If", "an overview over all jobs and their current state. Endpoint: [GET] /jobs Returns", "value that specifies the desired parallelism. Returns ------- JobTrigger Object that can be", "def taskmanagers(self): \"\"\" Returns task information aggregated by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers", "for each of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details for", ") if subtask_ids is None: subtask_ids = self.subtask_ids() params = { \"get\": \",\".join(metric_names),", "= self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return", "string value that identifies a job. checkpoint_id: int Long value that identifies a", "used to query the status of savepoint. \"\"\" data = { \"drain\": False", "a job, and may initiate back-pressure sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes", "\" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if job_ids is None: job_ids =", "hexadecimal string value that identifies a job. checkpoint_id: int Long value that identifies", "stopping the job. When the watermark is emitted, all event time timers will", "all jobs and their current state. Endpoint: [GET] /jobs Returns ------- list List", "str 32-character hexadecimal string value that identifies a job. Returns ------- dict Job", "string value that identifies a job. parallelism: int Positive integer value that specifies", "else: return False def stop(self, job_id, target_directory, drain=False): \"\"\" Stops a job with", "of subtask ids. \"\"\" return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\"", "subtask_id: int Positive integer value that identifies a subtask. Returns ------- dict \"\"\"", "32-character hexadecimal string value that identifies a job. Returns ------- dict Job configuration", "attempt's id Returns ------- dict The accumulators of the selected execution attempt of", "agg_modes is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f\"The", "elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None): \"\"\" Provides access to", "emits the maximum watermark before stopping the job. When the watermark is emitted,", "the checkpointing configuration of the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id:", "subtasks. The list of valid subtask ids is available through the subtask_ids() method.", "job_id, vertex_id): \"\"\" Constructor. Parameters ---------- prefix: str REST API url prefix. It", "for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None): \"\"\" Provides access to task", "Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "subtask ids. \"\"\" return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns", "32-character hexadecimal string value that identifies a job. metric_names: list (optional) List of", "is None: metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\",", "def get_metrics(self, job_id, metric_names=None): \"\"\" Provides access to job metrics. Endpoint: [GET] /jobs/:jobid/metrics", "identifies a vertex. Returns ------- JobVertexClient JobVertexClient instance that can execute vertex related", "result[metric_name] = elem return result def get(self, subtask_id): \"\"\" Returns details of the", "task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information aggregated by task", "{} for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem return result", "invalid value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if subtask_ids", "def metric_names(self): \"\"\" Returns the supported metric names. Returns ------- list List of", "subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character hexadecimal string value that", "aggregated by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information aggregated", "supported metric names. Returns ------- list List of metric names. \"\"\" return [", "returns with all existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list", "result def get(self, job_id): \"\"\" Returns details of a job. Endpoint: [GET] /jobs/:jobid", "integer value that specifies the desired parallelism. Returns ------- JobTrigger Object that can", "handled by Flink for this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str", "metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id: str 32-character hexadecimal string value that", "ids. \"\"\" return [elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False):", "metric_names is None: metric_names = self.metric_names() supported_agg_modes = [\"min\", \"max\", \"sum\", \"avg\"] if", "leaving any unhandled events or state. Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id: str", "def backpressure(self): \"\"\" Returns back-pressure information for a job, and may initiate back-pressure", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics. \"\"\" if metric_names is None:", "<all modes> subtask_ids: list List of positive integers to select specific subtasks. The", "_execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns task information aggregated by task manager. Endpoint: [GET]", "string value that identifies a job. Returns ------- dict Details of the selected", "dict The most recent exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\" Returns", "which should be calculated. Available aggregations are: \"min, max, sum, avg\". Default: <all", "job. target_directory: str Savepoint target directory. drain: bool (Optional) If it is True,", "---------- job_id: str 32-character hexadecimal string value that identifies a job. checkpoint_id: int", "Returns details of an execution attempt of a subtask. Multiple execution attempts happen", "integer value that identifies an execution attempt. Default: current execution attempt's id Returns", "list List of 32-character hexadecimal strings to select specific jobs. The list of", "------- list List of job ids. \"\"\" return [elem[\"id\"] for elem in self.all()]", "elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns an overview", "identifies a job. Returns ------- dict The execution result of the selected job.", "job ids. \"\"\" return [elem[\"id\"] for elem in self.all()] def overview(self): \"\"\" Returns", "with a summary for each of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns -------", "that identifies an execution attempt. Default: current execution attempt's id Returns ------- dict", "for all task. \"\"\" params = {} if include_serialized_value is not None: params[\"includeSerializedValue\"]", "jobs. The list of valid jobs are available through the job_ids() method. Default:", "job_id: str 32-character hexadecimal string value that identifies a job. target_directory: str Savepoint", "all existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional) List", "for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined", "type_name self.job_id = job_id self.trigger_id = trigger_id @property def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\"", "identifies an execution attempt. Default: current execution attempt's id Returns ------- dict The", "prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns details", "= {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for", "latest and history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self, job_id): \"\"\"", "REST API url prefix. It must contain the host, port pair. \"\"\" self._prefix", "the method will raise RestHandlerException because this rescaling is temporarily disabled. See FLINK-12312.", "\"\"\" params = {\"parallelism\": parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return", "] def metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns an overview over all jobs.", "calculated. Available aggregations are: \"min, max, sum, avg\". Default: <all modes> subtask_ids: list", "------- dict Details of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id):", "will fire, allowing you to process events that depend on this timer (e.g.", "def __init__(self, prefix, type_name, job_id, trigger_id): self._prefix = prefix self._type_name = type_name self.job_id", "Flink for this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character hexadecimal", "the subtask_ids() method. Default: <all subtasks>. Returns ------- dict Key-value pairs of metrics.", "subtask ids is available through the subtask_ids() method. Default: <all subtasks>. Returns -------", "\"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns back-pressure information for a job, and", "subtasks = {} for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", )", "return checkpoint_details subtasks = {} for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\",", "Returns details for a task, with a summary for each of its subtasks.", "/jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "prefix. It must contain the host, port pair. \"\"\" self.prefix = f\"{prefix}/jobs\" def", "[GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information aggregated by task manager. \"\"\" return", "JobVertexClient: def __init__(self, prefix, job_id, vertex_id): \"\"\" Constructor. Parameters ---------- prefix: str REST", "attempts happen in case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int", "savepoint. \"\"\" trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"]", "creation. Returns ------- JobTrigger Object that can be used to query the status", "current execution attempt's id Returns ------- dict The accumulators of the selected execution", "job. target_directory: str Savepoint target directory. cancel_job: bool If it is True, it", "------- list List of identifiers. \"\"\" return [elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]] def", "fully shut down your job without leaving any unhandled events or state. Endpoint:", "value that identifies a job. Returns ------- dict Checkpointing statistics for the selected", "list (optional) List of selected specific metric names. Default: <all metrics> Returns -------", "each of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details for a", "get(self, job_id): \"\"\" Returns details of a job. Endpoint: [GET] /jobs/:jobid Parameters ----------", "str Savepoint target directory. cancel_job: bool If it is True, it also stops", "f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns the subtask identifiers. Returns ------- list Positive integer", "execution attempts happen in case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id:", "specific metric names. Default: <all metrics> Returns ------- dict Job metrics. \"\"\" if", "def status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def __init__(self, prefix): \"\"\" Constructor.", "Returns back-pressure information for a job, and may initiate back-pressure sampling if necessary.", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive integer value that identifies a", "most recent exceptions that have been handled by Flink for this job. Endpoint:", "valid subtask ids is available through the subtask_ids() method. Default: <all subtasks>. Returns", "Task metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() params = {\"get\":", "aggregated subtask metrics. By default it returns with all existing metric names. Endpoint:", "execution attempt of a subtask. \"\"\" if attempt_id is None: attempt_id = self.get(subtask_id)[\"attempt\"]", "identifies a job. Returns ------- list List of identifiers. \"\"\" return [elem[\"id\"] for", "subtask_id): \"\"\" Returns details of the current or latest execution attempt of a", "names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None,", "of a subtask. \"\"\" if attempt_id is None: attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request(", "Returns the accumulators of an execution attempt of a subtask. Multiple execution attempts", "is available through the subtask_ids() method. Default: <all subtasks>. Returns ------- dict Key-value", "desired parallelism. Returns ------- JobTrigger Object that can be used to query the", "0: raise RestException( f\"The provided aggregation modes list contains invalid value. Supported aggregation", "and TaskManager(s) e.g. a location on a distributed file-system or Object Store. Draining", "------- list List of checkpoint ids. \"\"\" return [elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]]", "= {\"parallelism\": parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\",", "def metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns an overview over all jobs. Endpoint:", "a JobVertexClient. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "_execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if len(res) < 1: return True else: return", "string value that identifies a job. include_serialized_value: bool (Optional) Boolean value that specifies", "Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "the job_ids() method. Default: <all taskmanagers>. Returns ------- dict Aggregated job metrics. \"\"\"", "if metric_names is None: metric_names = self.metric_names() supported_agg_modes = [\"min\", \"max\", \"sum\", \"avg\"]", "would return a JobTrigger for further query identifier. Attention: The target directory has", "\"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\" Returns the configuration of a job.", "Returns the configuration of a job. Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id: str", "a subtask. \"\"\" if attempt_id is None: attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\"", "identifies an execution attempt. Default: current execution attempt's id Returns ------- dict Details", "[ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None): \"\"\" Provides access", "vertices of the selected job. Parameters ---------- job_id: str 32-character hexadecimal string value", "url prefix. It must contain the host, port pair. \"\"\" self.prefix = f\"{prefix}/jobs\"", "The accumulators of the selected execution attempt of a subtask. \"\"\" if attempt_id", "Returns ------- dict User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns the", "of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive integer value that", "attempt's id Returns ------- dict Details of the selected attempt. \"\"\" if attempt_id", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details for a task. \"\"\" return _execute_rest_request(url=self.prefix_url)", "selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character hexadecimal string value", "\"\"\" return [elem[\"id\"] for elem in self.all()] def overview(self): \"\"\" Returns an overview", "_execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result = {} for elem in query_result: metric_name =", "\"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns the supported metric names. Returns -------", "\"jobs\": \",\".join(job_ids), } query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result = {} for", "Multiple execution attempts happen in case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ----------", "subtasktimes(self): \"\"\" Returns time-related information for all subtasks of a task. Endpoint: [GET]", "Aggregated job metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() supported_agg_modes =", "[GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive integer value that identifies a subtask.", "If show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character hexadecimal", "execution attempt of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive", "Returns ------- dict Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\" Returns", "def get_checkpointing_configuration(self, job_id): \"\"\" Returns the checkpointing configuration of the selected job_id Endpoint:", "Default: <all modes> job_ids: list List of 32-character hexadecimal strings to select specific", "of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for all subtasks", "pressure stats are not available. Returns ------- dict Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\")", "for elem in self.all()] def overview(self): \"\"\" Returns an overview over all jobs.", "Parameters ---------- subtask_id: int Positive integer value that identifies a subtask. attempt_id: int", "\"\"\" return [elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\"", "of the selected job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self, job_id):", "Returns time-related information for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns", "url=f\"{self.prefix}/metrics\", params=params ) result = {} for elem in query_result: metric_name = elem.pop(\"id\")", "job_id): \"\"\" Returns details of a job. Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id:", "information for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict", "Returns ------- dict details for a task. \"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\"", "the job_id. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character hexadecimal string value", "Returns the most recent exceptions that have been handled by Flink for this", "subtask_ids() method. Default: <all subtasks>. Returns ------- dict Key-value pairs of metrics. \"\"\"", "\"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns the watermarks for all subtasks of", "identifies a subtask. Returns ------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None):", "\"rescaling\", job_id, trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\" Triggers a savepoint, and", "jobs and their current state. Endpoint: [GET] /jobs Returns ------- list List of", "the list of job_ids. Returns ------- list List of job ids. \"\"\" return", "attempt_id is None: return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None):", "/jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information aggregated by task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\")", "of the selected job. Parameters ---------- job_id: str 32-character hexadecimal string value that", "---------- job_id: str 32-character hexadecimal string value that identifies a job. parallelism: int", "used to query the status of rescaling. \"\"\" params = {\"parallelism\": parallelism} trigger_id", "is None: metric_names = self.metric_names() supported_agg_modes = [\"min\", \"max\", \"sum\", \"avg\"] if agg_modes", "job_id): \"\"\" Terminates a job. Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id: str 32-character", "} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id,", "subtasks>. Returns ------- dict Key-value pairs of metrics. \"\"\" if metric_names is None:", "Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "it is True, the details of the subtask are also returned. Returns -------", "_execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None): \"\"\" Provides access to job metrics. Endpoint: [GET]", "method will raise RestHandlerException because this rescaling is temporarily disabled. See FLINK-12312. Parameters", "[GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional) List of selected specific metric names.", "trigger_id) def terminate(self, job_id): \"\"\" Terminates a job. Endpoint: [PATCH] /jobs/:jobid Parameters ----------", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self, job_id): \"\"\" Returns checkpointing statistics for a", "details of a job. Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal", "the status of rescaling. \"\"\" params = {\"parallelism\": parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\",", "\"\"\" self._prefix = prefix @property def prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns", "value that identifies a job. Returns ------- dict The execution result of the", "of identifiers. \"\"\" return [elem[\"id\"] for elem in self.get(job_id)[\"vertices\"]] def get_accumulators(self, job_id, include_serialized_value=None):", "is True, the details of the subtask are also returned. Returns ------- dict", "def __init__(self, prefix, job_id, vertex_id): \"\"\" Constructor. Parameters ---------- prefix: str REST API", "the watermark is emitted, all event time timers will fire, allowing you to", "show_subtasks: bool If it is True, the details of the subtask are also", "can be used to query the status of rescaling. \"\"\" params = {\"parallelism\":", "params=params ) def get_checkpointing_configuration(self, job_id): \"\"\" Returns the checkpointing configuration of the selected", "job_id): \"\"\" Returns the configuration of a job. Endpoint: [GET] /jobs/:jobid/config Parameters ----------", "parallelism. Returns ------- JobTrigger Object that can be used to query the status", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self):", "\"\"\" Returns task information aggregated by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns -------", "None: params[\"includeSerializedValue\"] = ( \"true\" if include_serialized_value else \"false\" ) return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\",", "/jobs/:jobid/plan Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details", "return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns details for a task, with a summary", "for elem in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns all user-defined accumulators for all", "to process events that depend on this timer (e.g. time windows or process", "\"\"\" Returns an overview over all jobs and their current state. Endpoint: [GET]", "otherwise False. \"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if len(res) <", "/jobs/:jobid/rescaling Notes ----- Using Flink version 1.12, the method will raise RestHandlerException because", "return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\" Returns the most recent exceptions that have", "to job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id: str 32-character hexadecimal string", "means that the back pressure stats are not available. Returns ------- dict Backpressure", "not None: params[\"includeSerializedValue\"] = ( \"true\" if include_serialized_value else \"false\" ) return _execute_rest_request(", "Returns ------- JobVertexClient JobVertexClient instance that can execute vertex related queries. \"\"\" return", "all subtasks of a task. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self, prefix):", "If it is True, it emits the maximum watermark before stopping the job.", "because this rescaling is temporarily disabled. See FLINK-12312. Parameters ---------- job_id: str 32-character", "False. \"\"\" res = _execute_rest_request( url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if len(res) < 1:", "/jobs/:jobid/savepoints Notes ----- The target directory has to be a location accessible by", "status(self): return _execute_rest_request( url=f\"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}\" ) class JobVertexSubtaskClient: def __init__(self, prefix): \"\"\" Constructor. Parameters", "a job. include_serialized_value: bool (Optional) Boolean value that specifies whether serialized user task", "is None else drain, \"targetDirectory\": target_directory, } trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202,", "metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns an overview over all jobs. Endpoint: [GET]", "of selected specific metric names. Default: <all metrics> Returns ------- dict Job metrics.", "identifier. Attention: The target directory has to be a location accessible by both", "a job. Returns ------- dict Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id):", "TaskManager(s) e.g. a location on a distributed file-system or Object Store. Draining emits", "------- list List of metric names. \"\"\" return [ elem[\"id\"] for elem in", "further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using Flink version 1.12, the", "/jobs Returns ------- list List of jobs and their current state. \"\"\" return", "Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "port pair. \"\"\" self.prefix = f\"{prefix}/jobs\" def all(self): \"\"\" Returns an overview over", "metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides access to aggregated subtask metrics. By default it", "stopping the job. default: False Returns ------- JobTrigger Object that can be used", "is None: metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\",", "Returns the accumulators for all tasks of a job, aggregated across the respective", "time windows or process functions). This is useful when you want to fully", "it is True, it also stops the job after the savepoint creation. Returns", "and their current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns the list", "avg\". Default: <all modes> job_ids: list List of 32-character hexadecimal strings to select", "\"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem in subtask_ids]), } query_result = _execute_rest_request( url=f\"{self.prefix_url}/metrics\",", "job. This async operation would return a 'triggerid' for further query identifier. Endpoint:", "str 32-character hexadecimal string value that identifies a job. parallelism: int Positive integer", "------- dict Task metrics. \"\"\" if metric_names is None: metric_names = self.metric_names() params", "for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related", "default it returns with all existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ----------", "for a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character hexadecimal string", "Time-related information for all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns task", "attempt of a subtask. \"\"\" if attempt_id is None: attempt_id = self.get(subtask_id)[\"attempt\"] return", "Provides access to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics.", "_execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\" Returns the result of a job execution. Gives", "When the watermark is emitted, all event time timers will fire, allowing you", "str REST API url prefix. It must contain the host, port pair. \"\"\"", "value that identifies a job. Returns ------- dict Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\")", "if len(res) < 1: return True else: return False def stop(self, job_id, target_directory,", "<all subtasks>. Returns ------- dict Key-value pairs of metrics. \"\"\" if metric_names is", "32-character hexadecimal string value that identifies a job. checkpoint_id: int Long value that", "all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information", "checkpoint_id: int Long value that identifies a checkpoint. show_subtasks: bool If it is", "Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. vertex_id:", "value that identifies a job. Returns ------- dict Details of the selected job.", "params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for elem in subtask_ids]),", "It must contain the host, port pair. \"\"\" self._prefix = prefix self.job_id =", "port pair. \"\"\" self._prefix = prefix self.job_id = job_id self.vertex_id = vertex_id @property", "names. Returns ------- list List of metric names. \"\"\" return [ elem[\"id\"] for", "return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\" Returns task information aggregated by task manager. Endpoint:", "Default: current execution attempt's id Returns ------- dict The accumulators of the selected", "a job. target_directory: str Savepoint target directory. drain: bool (Optional) If it is", "True else: return False def stop(self, job_id, target_directory, drain=False): \"\"\" Stops a job", "\"\"\" Terminates a job. Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal", "def job_ids(self): \"\"\" Returns the list of job_ids. Returns ------- list List of", "a job with a savepoint. This async operation would return a JobTrigger for", "Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if subtask_ids is None: subtask_ids = self.subtask_ids()", "metrics(self, metric_names=None): \"\"\" Provides access to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns -------", "specific jobs. The list of valid jobs are available through the job_ids() method.", "Returns ------- dict Details of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self,", "API url prefix. It must contain the host, port pair. \"\"\" self._prefix =", "job_ids is None: job_ids = self.job_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes),", "result of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None): \"\"\"", "checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ----------", "Provides access to job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id: str 32-character", "the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character hexadecimal string", "metric_names(self): \"\"\" Returns the supported metric names. Returns ------- list List of metric", "can be used to query the status of savepoint. \"\"\" trigger_id = _execute_rest_request(", "get_execution_result(self, job_id): \"\"\" Returns the result of a job execution. Gives access to", ")[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def get_vertex(self, job_id, vertex_id): \"\"\" Returns a", "ids of vertices of the selected job. Parameters ---------- job_id: str 32-character hexadecimal", "\"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def get_metrics(self, job_id, metric_names=None): \"\"\" Provides access to job metrics.", "return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns the supported metric names. Returns ------- list", "\"sum\", \"avg\"] if agg_modes is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0:", "whether serialized user task accumulators should be included in the response. Returns -------", "= _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result = {} for elem in query_result: metric_name", "int (Optional) Positive integer value that identifies an execution attempt. Default: current execution", "ids. \"\"\" return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns all", "result = {} for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem[\"value\"]", "Store. Draining emits the maximum watermark before stopping the job. When the watermark", "return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns details of an execution attempt", "[GET] /jobs/:jobid/metrics Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "to the execution time of the job and to all accumulators created by", "> 0: raise RestException( f\"The provided aggregation modes list contains invalid value. Supported", "(Optional) Positive integer value that identifies an execution attempt. Default: current execution attempt's", "32-character hexadecimal string value that identifies a job. include_serialized_value: bool (Optional) Boolean value", "Available aggregations are: \"min, max, sum, avg\". Default: <all modes> job_ids: list List", "@property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\" Returns details for a task,", "a job. Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value", ") if not show_subtasks: return checkpoint_details subtasks = {} for vertex_id in checkpoint_details[\"tasks\"].keys():", "Positive integer value that identifies a subtask. Returns ------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\")", "Flink version 1.12, the method will raise RestHandlerException because this rescaling is temporarily", "def overview(self): \"\"\" Returns an overview over all jobs. Endpoint: [GET] /jobs/overview Returns", "List of checkpoint ids. \"\"\" return [elem[\"id\"] for elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self,", "async operation would return a JobTrigger for further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints", "information aggregated by task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns the", "subtask_ids = self.subtask_ids() params = { \"get\": \",\".join(metric_names), \"agg\": \",\".join(agg_modes), \"subtasks\": \",\".join([str(elem) for", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint ids of the", "that identifies a job. vertex_id: str 32-character hexadecimal string value that identifies a", "target_directory: str Savepoint target directory. cancel_job: bool If it is True, it also", "\"\"\" Returns the most recent exceptions that have been handled by Flink for", "string value that identifies a job. Returns ------- dict The execution result of", "e.g. a location on a distributed file-system or Object Store. Draining emits the", "that identifies a job. Returns ------- dict Checkpointing statistics for the selected job:", "\"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix:", "both the JobManager(s) and TaskManager(s) e.g. a location on a distributed file-system or", "= { \"drain\": False if drain is None else drain, \"targetDirectory\": target_directory, }", ") def get_checkpointing_configuration(self, job_id): \"\"\" Returns the checkpointing configuration of the selected job_id", "{','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if subtask_ids is None: subtask_ids = self.subtask_ids() params", "---------- job_id: str 32-character hexadecimal string value that identifies a job. metric_names: list", "[PATCH] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "modes> job_ids: list List of 32-character hexadecimal strings to select specific jobs. The", "str 32-character hexadecimal string value that identifies a job. target_directory: str Savepoint target", "for this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character hexadecimal string", "query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"], elem[\"value\"]) for elem in query_result])", "of the job_id. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "by task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\" Returns the watermarks for", "Constructor. Parameters ---------- prefix: str REST API url prefix. It must contain the", "list (optional) List of aggregation modes which should be calculated. Available aggregations are:", "job_id, trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\" Triggers a savepoint, and optionally", "selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\" Returns the configuration of", "async operation would return a JobTrigger for further query identifier. Attention: The target", "accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def terminate(self,", "supported_agg_modes = [\"min\", \"max\", \"sum\", \"avg\"] if agg_modes is None: agg_modes = supported_agg_modes", "value that specifies whether serialized user task accumulators should be included in the", "to be a location accessible by both the JobManager(s) and TaskManager(s) e.g. a", "to select specific jobs. The list of valid jobs are available through the", "query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem return result def get(self, subtask_id): \"\"\"", "_execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\" Returns the configuration of a job. Endpoint: [GET]", "identifies a job. Returns ------- dict Checkpointing configuration of the selected job. \"\"\"", "Returns a JobVertexClient. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "= _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"] = subtasks return checkpoint_details def rescale(self, job_id,", "the selected job. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "events or state. Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id: str 32-character hexadecimal string", "in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem return result def get(self, job_id):", "job_id: str 32-character hexadecimal string value that identifies a job. checkpoint_id: int Long", "return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\" Returns the configuration of a job. Endpoint:", "Returns the ids of vertices of the selected job. Parameters ---------- job_id: str", "distributed file-system or Object Store. Parameters ---------- job_id: str 32-character hexadecimal string value", "has to be a location accessible by both the JobManager(s) and TaskManager(s) e.g.", "accessible by both the JobManager(s) and TaskManager(s) e.g. a location on a distributed", "a task. \"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns back-pressure information for a", "selected specific metric names. Default: <all metrics> agg_modes: list (optional) List of aggregation", "return result def subtasktimes(self): \"\"\" Returns time-related information for all subtasks of a", "hexadecimal string value that identifies a job. target_directory: str Savepoint target directory. drain:", "that identifies a job. Returns ------- dict The most recent exceptions. \"\"\" return", "= prefix @property def prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns the subtask", "Multiple execution attempts happen in case of failure/recovery. Parameters ---------- subtask_id: int Positive", "= supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f\"The provided aggregation modes list", "for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid", "url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for elem in query_result: metric_name = elem.pop(\"id\")", "JobsClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST API url", "string value that identifies a job. target_directory: str Savepoint target directory. drain: bool", "Returns ------- dict Job configuration \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/config\") def get_exceptions(self, job_id): \"\"\" Returns", "job_id: str 32-character hexadecimal string value that identifies a job. parallelism: int Positive", "configuration of the selected job. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self,", "= {} for elem in query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem[\"value\"] return", "return result def get(self, subtask_id): \"\"\" Returns details of the current or latest", "taskmanagers>. Returns ------- dict Aggregated job metrics. \"\"\" if metric_names is None: metric_names", "want to fully shut down your job without leaving any unhandled events or", "pair. \"\"\" self._prefix = prefix self.job_id = job_id self.vertex_id = vertex_id @property def", "[GET] /jobs Returns ------- list List of jobs and their current state. \"\"\"", "\"\"\" Returns the checkpointing configuration of the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters", "to query the status of savepoint. \"\"\" data = { \"drain\": False if", "job. Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that", "= {\"get\": \",\".join(metric_names)} query_result = _execute_rest_request( url=f\"{self.prefix}/{job_id}/metrics\", params=params ) return dict([(elem[\"id\"], elem[\"value\"]) for", "Long value that identifies a checkpoint. show_subtasks: bool If it is True, the", "status of savepoint. \"\"\" data = { \"drain\": False if drain is None", "[GET] /jobs/metrics Parameters ---------- metric_names: list (optional) List of selected specific metric names.", "[GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\"", "------- dict Time-related information for all subtasks \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/subtasktimes\") def taskmanagers(self): \"\"\"", "params=params )[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\"", "functions). This is useful when you want to fully shut down your job", "list: {','.join(agg_modes)}\" ) if subtask_ids is None: subtask_ids = self.subtask_ids() params = {", "hexadecimal string value that identifies a job. target_directory: str Savepoint target directory. cancel_job:", "str 32-character hexadecimal string value that identifies a job. Returns ------- bool True", "JobTrigger for further query identifier. Attention: The target directory has to be a", "subtask_ids: list List of positive integers to select specific subtasks. The list of", "(optional) List of selected specific metric names. Default: <all metrics> agg_modes: list (optional)", "\"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/accumulators\") def metric_names(self): \"\"\" Returns the supported metric names. Returns -------", "a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information for all subtasks", "if include_serialized_value is not None: params[\"includeSerializedValue\"] = ( \"true\" if include_serialized_value else \"false\"", "url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def get_vertex(self, job_id,", "prefix self._type_name = type_name self.job_id = job_id self.trigger_id = trigger_id @property def status(self):", "directory. drain: bool (Optional) If it is True, it emits the maximum watermark", "port pair. \"\"\" self._prefix = prefix @property def prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self):", "jobs and their current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"] def job_ids(self): \"\"\" Returns the", "elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): \"\"\" Provides", "Returns ------- list List of existing jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\"", "drain, \"targetDirectory\": target_directory, } trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return", "metric names. Returns ------- list List of metric names. \"\"\" return [ elem[\"id\"]", "down your job without leaving any unhandled events or state. Endpoint: [GET] /jobs/:jobid/stop", "class JobVertexSubtaskClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str REST API", "that can be used to query the status of savepoint. \"\"\" data =", "exceptions. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/exceptions\") def get_execution_result(self, job_id): \"\"\" Returns the result of a", "The target directory has to be a location accessible by both the JobManager(s)", "value. Supported aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if subtask_ids is", "task accumulators should be included in the response. Returns ------- dict Accumulators for", "def subtasktimes(self): \"\"\" Returns time-related information for all subtasks of a task. Endpoint:", "failure/recovery. Parameters ---------- subtask_id: int Positive integer value that identifies a subtask. attempt_id:", "\"drain\": False if drain is None else drain, \"targetDirectory\": target_directory, } trigger_id =", "a job, aggregated across the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id:", "attempt. Default: current execution attempt's id Returns ------- dict The accumulators of the", "of the subtask are also returned. Returns ------- dict \"\"\" checkpoint_details = _execute_rest_request(", "are: \"min, max, sum, avg\". Default: <all modes> subtask_ids: list List of positive", "be calculated. Available aggregations are: \"min, max, sum, avg\". Default: <all modes> subtask_ids:", "allowing you to process events that depend on this timer (e.g. time windows", "hexadecimal string value that identifies a job. Returns ------- dict The most recent", "JobTrigger Object that can be used to query the status of rescaling. \"\"\"", "str 32-character hexadecimal string value that identifies a job. include_serialized_value: bool (Optional) Boolean", "return _execute_rest_request(url=f\"{self.prefix_url}/watermarks\") class JobsClient: def __init__(self, prefix): \"\"\" Constructor. Parameters ---------- prefix: str", ") return _execute_rest_request( url=f\"{self.prefix}/{job_id}/accumulators\", http_method=\"GET\", params=params ) def get_checkpointing_configuration(self, job_id): \"\"\" Returns the", "List of metric names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ]", "\"\"\" if metric_names is None: metric_names = self.metric_names() supported_agg_modes = [\"min\", \"max\", \"sum\",", "that can be used to query the status of rescaling. \"\"\" params =", "@property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): \"\"\"", "return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None): \"\"\" Returns the", "their current state. Endpoint: [GET] /jobs Returns ------- list List of jobs and", "jobs. Endpoint: [GET] /jobs/overview Returns ------- list List of existing jobs. \"\"\" return", "= {} for vertex_id in checkpoint_details[\"tasks\"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}\", http_method=\"GET\", ) checkpoint_details[\"subtasks\"]", "task information aggregated by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task", "of valid subtask ids is available through the subtask_ids() method. Default: <all subtasks>.", "metric names. \"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self,", "metric_names=None): \"\"\" Provides access to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict", "attempt. \"\"\" if attempt_id is None: return self.get(subtask_id) return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}\" ) def", "selected job: counts, summary, latest and history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" )", "elem in self.get_checkpoints(job_id=job_id)[\"history\"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): \"\"\" Returns details for a", "job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- dict", "32-character hexadecimal string value that identifies a job. target_directory: str Savepoint target directory.", "your job without leaving any unhandled events or state. Endpoint: [GET] /jobs/:jobid/stop Parameters", "def get_plan(self, job_id): \"\"\" Returns the dataflow plan of a job. Endpoint: [GET]", "\"\"\" if metric_names is None: metric_names = self.metric_names() params = {\"get\": \",\".join(metric_names)} query_result", "= vertex_id @property def prefix_url(self): return f\"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}\" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def", "Returns ------- dict The execution result of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\")", "Parameters ---------- prefix: str REST API url prefix. It must contain the host,", "attempt of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive integer", "that identifies a job. Returns ------- dict Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def", "def accumulators(self): \"\"\" Returns all user-defined accumulators for all subtasks of a task.", ")[\"triggerid\"] return JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\" Triggers", "------- dict Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns the supported", "\"\"\" Returns the supported metric names. Returns ------- list List of metric names.", "Returns task information aggregated by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict", "the maximum watermark before stopping the job. When the watermark is emitted, all", "to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics. \"\"\" if", "disabled. See FLINK-12312. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies", "params = {\"parallelism\": parallelism} trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/rescaling\", http_method=\"PATCH\", params=params )[\"triggerid\"] return JobTrigger(self.prefix,", "events that depend on this timer (e.g. time windows or process functions). This", "job, and may initiate back-pressure sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes -----", "------- dict Details of the selected attempt. \"\"\" if attempt_id is None: return", "\"\"\" return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None,", "details for a task. \"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self): \"\"\" Returns back-pressure information", "the selected job: counts, summary, latest and history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\"", "---------- job_id: str 32-character hexadecimal string value that identifies a job. target_directory: str", "------- dict The execution result of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/execution-result\") def", "return result def get(self, job_id): \"\"\" Returns details of a job. Endpoint: [GET]", "[GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details for a task. \"\"\" return _execute_rest_request(url=self.prefix_url) def", "available. Returns ------- dict Backpressure information \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/backpressure\") def metric_names(self): \"\"\" Returns", "[GET] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "False if drain is None else drain, \"targetDirectory\": target_directory, } trigger_id = _execute_rest_request(", "JobTrigger(self.prefix, \"rescaling\", job_id, trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\" Triggers a savepoint,", "job_id): \"\"\" Returns the ids of vertices of the selected job. Parameters ----------", "dict([(elem[\"id\"], elem[\"value\"]) for elem in query_result]) def get_plan(self, job_id): \"\"\" Returns the dataflow", "Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. metric_names:", "Parameters ---------- metric_names: list (optional) List of selected specific metric names. Default: <all", "the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character hexadecimal string", "return [ elem[\"id\"] for elem in _execute_rest_request(url=f\"{self.prefix_url}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None):", "a job. Returns ------- list List of identifiers. \"\"\" return [elem[\"id\"] for elem", "------- JobTrigger Object that can be used to query the status of savepoint.", "get_plan(self, job_id): \"\"\" Returns the dataflow plan of a job. Endpoint: [GET] /jobs/:jobid/plan", "url=f\"{self.prefix}/{job_id}\", http_method=\"PATCH\", accepted_status_code=202 ) if len(res) < 1: return True else: return False", "temporarily disabled. See FLINK-12312. Parameters ---------- job_id: str 32-character hexadecimal string value that", "of job ids. \"\"\" return [elem[\"id\"] for elem in self.all()] def overview(self): \"\"\"", "configuration of the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character", "\"\"\" Triggers a savepoint, and optionally cancels the job afterwards. This async operation", "url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class JobVertexClient: def __init__(self, prefix, job_id, vertex_id): \"\"\" Constructor. Parameters ----------", "bool If it is True, the details of the subtask are also returned.", "dict Task information aggregated by task manager. \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/taskmanagers\") def watermarks(self): \"\"\"", "string value that identifies a job. target_directory: str Savepoint target directory. cancel_job: bool", "attempt_id=None): \"\"\" Returns details of an execution attempt of a subtask. Multiple execution", "prefix, type_name, job_id, trigger_id): self._prefix = prefix self._type_name = type_name self.job_id = job_id", "rescaling of a job. This async operation would return a 'triggerid' for further", "This async operation would return a JobTrigger for further query identifier. Endpoint: [GET]", "fire, allowing you to process events that depend on this timer (e.g. time", "This async operation would return a JobTrigger for further query identifier. Attention: The", "Returns ------- dict Accumulators for all task. \"\"\" params = {} if include_serialized_value", "Returns ------- JobTrigger Object that can be used to query the status of", "\"\"\" Triggers the rescaling of a job. This async operation would return a", "job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character hexadecimal string value that", "query the status of rescaling. \"\"\" params = {\"parallelism\": parallelism} trigger_id = _execute_rest_request(", "= type_name self.job_id = job_id self.trigger_id = trigger_id @property def status(self): return _execute_rest_request(", "Returns ------- dict Aggregated job metrics. \"\"\" if metric_names is None: metric_names =", "dict Details of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\"", "query_result: metric_name = elem.pop(\"id\") result[metric_name] = elem[\"value\"] return result def subtasktimes(self): \"\"\" Returns", "of the selected execution attempt of a subtask. \"\"\" if attempt_id is None:", "(optional) List of selected specific metric names. Default: <all metrics> Returns ------- dict", "a checkpoint. show_subtasks: bool If it is True, the details of the subtask", "and history. \"\"\" return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints\", http_method=\"GET\" ) def get_checkpoint_ids(self, job_id): \"\"\" Returns", "Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. target_directory:", "a JobTrigger for further query identifier. Attention: The target directory has to be", "that identifies a subtask. attempt_id: int (Optional) Positive integer value that identifies an", "job. default: False Returns ------- JobTrigger Object that can be used to query", "supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f\"The provided aggregation modes list contains", "in self.accumulators()[\"subtasks\"]] def accumulators(self): \"\"\" Returns all user-defined accumulators for all subtasks of", "have been handled by Flink for this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ----------", "job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character hexadecimal string value that", "are also returned. Returns ------- dict \"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", )", "to query the status of rescaling. \"\"\" params = {\"parallelism\": parallelism} trigger_id =", "value that identifies a job. Returns ------- list List of identifiers. \"\"\" return", "process functions). This is useful when you want to fully shut down your", "Returns ------- list List of jobs and their current state. \"\"\" return _execute_rest_request(url=self.prefix)[\"jobs\"]", "= elem.pop(\"id\") result[metric_name] = elem return result def get(self, subtask_id): \"\"\" Returns details", "that identifies a job. Returns ------- dict The execution result of the selected", "select specific jobs. The list of valid jobs are available through the job_ids()", "trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id)", "job. Returns ------- dict The execution result of the selected job. \"\"\" return", "is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character hexadecimal string value", "prefix @property def prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns the subtask identifiers.", "value that identifies a checkpoint. show_subtasks: bool If it is True, the details", "http_method=\"GET\", ) if not show_subtasks: return checkpoint_details subtasks = {} for vertex_id in", "job_id, trigger_id) def terminate(self, job_id): \"\"\" Terminates a job. Endpoint: [PATCH] /jobs/:jobid Parameters", "contain the host, port pair. \"\"\" self._prefix = prefix @property def prefix_url(self): return", "specifies whether serialized user task accumulators should be included in the response. Returns", "url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self, job_id): \"\"\" Returns checkpointing statistics for a job.", "names. Default: <all metrics> Returns ------- dict Job metrics. \"\"\" if metric_names is", "identifies a job. vertex_id: str 32-character hexadecimal string value that identifies a vertex.", "return _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/config\", http_method=\"GET\" ) def get_checkpoints(self, job_id): \"\"\" Returns checkpointing statistics for", "and to all accumulators created by this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ----------", "\"\"\" if attempt_id is None: attempt_id = self.get(subtask_id)[\"attempt\"] return _execute_rest_request( url=f\"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators\" ) class", "execution attempts happen in case of failure/recovery. Parameters ---------- subtask_id: int Positive integer", "return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def get_vertex(self, job_id, vertex_id): \"\"\" Returns a JobVertexClient.", "by both the JobManager(s) and TaskManager(s) e.g. a location on a distributed file-system", "def create_savepoint(self, job_id, target_directory, cancel_job=False): \"\"\" Triggers a savepoint, and optionally cancels the", "prefix_url(self): return f\"{self._prefix}/subtasks\" def subtask_ids(self): \"\"\" Returns the subtask identifiers. Returns ------- list", "jobs. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"] def metric_names(self): \"\"\" Returns the supported metric names. Returns", "the job and to all accumulators created by this job. Endpoint: [GET] /jobs/:jobid/execution-result", "task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for all subtasks of a", "Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The target directory has to be a location", "True if the job has been canceled, otherwise False. \"\"\" res = _execute_rest_request(", "emitted, all event time timers will fire, allowing you to process events that", "aggregation \" f\"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if job_ids is None: job_ids", "all tasks of a job, aggregated across the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators", "vertex. Returns ------- JobVertexClient JobVertexClient instance that can execute vertex related queries. \"\"\"", "contain the host, port pair. \"\"\" self._prefix = prefix self.job_id = job_id self.vertex_id", "the job afterwards. This async operation would return a JobTrigger for further query", "host, port pair. \"\"\" self.prefix = f\"{prefix}/jobs\" def all(self): \"\"\" Returns an overview", "\"agg\": \",\".join(agg_modes), \"jobs\": \",\".join(job_ids), } query_result = _execute_rest_request( url=f\"{self.prefix}/metrics\", params=params ) result =", "\"\"\" Returns details of the current or latest execution attempt of a subtask.", "If it is True, the details of the subtask are also returned. Returns", "return True else: return False def stop(self, job_id, target_directory, drain=False): \"\"\" Stops a", "job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character hexadecimal string value that", "Returns ------- dict Task metrics. \"\"\" if metric_names is None: metric_names = self.metric_names()", "string value that identifies a vertex. Returns ------- JobVertexClient JobVertexClient instance that can", "for a job, and may initiate back-pressure sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure", "of savepoint. \"\"\" data = { \"drain\": False if drain is None else", "the configuration of a job. Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id: str 32-character", "return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def terminate(self, job_id): \"\"\" Terminates a job. Endpoint:", "is not None: params[\"includeSerializedValue\"] = ( \"true\" if include_serialized_value else \"false\" ) return", "job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id: str 32-character hexadecimal string value", "/jobs/metrics Parameters ---------- metric_names: list (optional) List of selected specific metric names. Default:", "in _execute_rest_request(url=f\"{self.prefix}/metrics\") ] def metrics(self, metric_names=None, agg_modes=None, job_ids=None): \"\"\" Returns an overview over", "that have been handled by Flink for this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters", "of the selected job. \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}\") def get_config(self, job_id): \"\"\" Returns the", "/jobs/:jobid/vertices/:vertexid Returns ------- dict details for a task. \"\"\" return _execute_rest_request(url=self.prefix_url) def backpressure(self):", "------- dict The accumulators of the selected execution attempt of a subtask. \"\"\"", "all event time timers will fire, allowing you to process events that depend", "/jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for all subtasks of a task. \"\"\" return", "Returns ------- dict \"\"\" return _execute_rest_request(url=f\"{self.prefix_url}/{subtask_id}\") def get_attempt(self, subtask_id, attempt_id=None): \"\"\" Returns details", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information aggregated by task manager. \"\"\"", "Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional) List of selected specific metric", "return [elem[\"id\"] for elem in self.all()] def overview(self): \"\"\" Returns an overview over", "a JobTrigger for further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The target", "names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional) List of selected specific", "else drain, \"targetDirectory\": target_directory, } trigger_id = _execute_rest_request( url=f\"{self.prefix}/{job_id}/stop\", http_method=\"POST\", accepted_status_code=202, json=data, )[\"request-id\"]", "_execute_rest_request( url=f\"{self.prefix}/{job_id}/savepoints\", http_method=\"POST\", accepted_status_code=202, json={\"cancel-job\": cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id,", "_execute_rest_request( url=f\"{self.prefix_url}/metrics\", params=params ) result = {} for elem in query_result: metric_name =", "also returned. Returns ------- dict \"\"\" checkpoint_details = _execute_rest_request( url=f\"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}\", http_method=\"GET\", ) if", "cancel_job, \"target-directory\": target_directory}, )[\"request-id\"] return JobTrigger(self.prefix, \"savepoints\", job_id, trigger_id) def terminate(self, job_id): \"\"\"", "checkpoint. show_subtasks: bool If it is True, the details of the subtask are", "{','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}\" ) if job_ids is None: job_ids = self.job_ids() params", "Returns ------- list List of job ids. \"\"\" return [elem[\"id\"] for elem in", "List of positive integers to select specific subtasks. The list of valid subtask", "for elem in query_result]) def get_plan(self, job_id): \"\"\" Returns the dataflow plan of", "vertex_id: str 32-character hexadecimal string value that identifies a vertex. Returns ------- JobVertexClient", "job. Returns ------- dict Dataflow plan \"\"\" return _execute_rest_request(url=f\"{self.prefix}/{job_id}/plan\")[\"plan\"] def get_vertex_ids(self, job_id): \"\"\"", "list of valid subtask ids is available through the subtask_ids() method. Default: <all", "job. Returns ------- dict Checkpointing statistics for the selected job: counts, summary, latest", "/jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job.", "[GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a", "hexadecimal string value that identifies a job. Returns ------- dict Checkpointing statistics for", "get_checkpoint_ids(self, job_id): \"\"\" Returns checkpoint ids of the job_id. Parameters ---------- job_id: str", "that identifies a job. Returns ------- list List of checkpoint ids. \"\"\" return" ]
[ "in destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges =", "l in namelist for e in l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames) # all_edges", "destin = elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for", "e, f in zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name = f, cash =", "Copyright 2018, <NAME>, Stevens Institute of Technology Licensed under the Apache License, Version", "f for e, f in zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name = f,", "fedPriceDict.keys()}, edgelist = []) solutionObj = optimizeMILP(elements = elements, linklist = linklist, destinations", "NetTop(elementnames, all_edges, federatenames, sources, destinations) if tempNetTop.hashid not in hashNetworkDict: # print(seed, tempNetTop.hashid)", "if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames))", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "= Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in allProcs: a.join() finalDict = {}", "elementDict = {e.name: e for e in elements} sources = [e for e", "totalvalue) def updateCostValue(objlist, proc, tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE']", "edgedivider in list(fedeldensitylist): # filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a, b in list(product(elementnames,", "in elements if e.name not in nettopObj.destinations] # # sources = nettopObj.sources #", "args = parser.parse_args() argsdict = vars(args) nproc = argsdict['nproc'] time = 0 #", "infile: # hashNetworkDict = pickle.load(infile) # for h, obj in objDict.items(): # hashNetworkDict[h]", "pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict = {fname: (sharelinkcost, uselinkcost)", "def aggregateNetworks(): netlist = [] for (numfederates, numelements), edgedivider in list(fedeldensitylist): filename =", "= edgePriceDict, solutionObj = solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost,", "obj for obj in objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc, filename with", "print(nettopObj.destinations) # print(len(newtasks)) # print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict =", "0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] elementDict = {e.name: e", "{} for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb')", "as outfile: pickle.dump(hashNetDict, outfile) if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"This processed raw", "0, uselinkcost = 0) for f in set(federatenames)] # federateDict = {f.name: f", "federatenames} # federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0],", "License for the specific language governing permissions and limitations under the License. \"\"\"", "[e for e in elements if e.name not in nettopObj.destinations] # # sources", "filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h,", "l[1] in destinations and l not in all_edges] existingedges2desgin = [l for l", "in [(500,501), (400,600)]: if mintup in costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup] #", "0 newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time", "= {h: obj for h,obj in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict = {}", "and l not in all_edges] existingedges2desgin = [l for l in all_edges if", "e not in destinations] if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict =", "newedges # print(newedges) # print(len(all_edges)) all_edge_set = set([]) destin_count = 0 for edge", "f in set(federatenames)] federateDict = {f.name: f for f in federates} # print(\"element", "for f in fedPriceDict.keys()}, edgelist = []) solutionObj = optimizeMILP(elements = elements, linklist", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "+ 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {} for proc in range(60): tempfilename =", "parser = argparse.ArgumentParser(description=\"This processed raw data of twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores on", "d in destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop =", "+ '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj", "resources.classes import * from resources.globalv import * from collections import defaultdict, Counter from", "random import hashlib from resources.optimizeMILP import optimizeMILP from multiprocessing import Process, Manager import", "len([l for l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for", "tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations) if tempNetTop.hashid not in hashNetworkDict: #", "in enumerate(sources)] elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} #", "= pickle.load(infile) topollist = list(hashNetworkDict.values()) N = len(topollist) allProcs = [] for proc", "with open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile) # with open(filename, 'rb') as infile:", "zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print(\"new tasks:\", newtasks) for sharelinkcost, uselinkcost in basetuples:", "by applicable law or agreed to in writing, software distributed under the License", "nproc, filename with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values())", "infile: hashNetworkDict = pickle.load(infile) hashNetworkDict = {h: obj for h,obj in hashNetworkDict.items() if", "= True)[:10] # print(filename, [e.auctionscore for e in toplist]) with open(filename[:-2] + '_top10.p',", "= solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print(\"New tuple", "in list(product(elementnames, elementnames)) if (a != b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges =", "= {} for seed in seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i =", "fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) # print(linklist)", "outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist = [] for (numfederates, numelements), edgedivider in", "= [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1])", "l in all_possible_edges if l[1] in destinations and l not in all_edges] existingedges2desgin", "print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0: objDict = {obj.hashid: obj for", "destinations = elementnames[-2:] sources = [e for e in elementnames if e not", "obj in list(hashNetworkDict.items()): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile)", "newtasks = newtasks, time = time, federates = federates, edgePriceDict = edgePriceDict, solutionObj", "hashNetworkDict = pickle.load(infile) for h, obj in list(hashNetworkDict.items()): finalDict[h] = obj with open(filename,", "print([s.name for s in sources]) linklist = [Link(source = elementDict[e1], destin = elementDict[e2],", "dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {} for proc in range(60): tempfilename", "[l for l in all_edges if l[1] in destinations] nume2d = int(len(sources)/2 -", "# print([s.name for s in sources]) destinations = elementnames[-2:] sources = [e for", "print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) # print(federates) # print(linklist) solutionObj =", "linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] #", "'wb') as outfile: objDict = {obj.hashid: obj for obj in objlist} pickle.dump(objDict, outfile)", "+ n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for", "uselinkcost) for fname in federatenames} # federates = [Federate(name = f, cash =", "(e1, e2) in nettopObj.edges] # time = 0 # newtasks = [Task(id =", "= pickle.load(infile) # for h, obj in objDict.items(): # hashNetworkDict[h] = obj #", "in list(hashNetworkDict.items()): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) def", "the specific language governing permissions and limitations under the License. \"\"\" import sys,", "# for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))):", "0, sharelinkcost = 0, uselinkcost = 0) for f in set(federatenames)] # federateDict", "help='cores on server') parser.add_argument('--n', type=int, default=3, help='cores on server') args = parser.parse_args() argsdict", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "with open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as outfile:", "print([s.name for s in sources]) destinations = elementnames[-2:] sources = [e for e", "may not use this file except in compliance with the License. You may", "for edge in all_edges: s, d = edge # if destin_count > len(satellites):", "uselinkcost)] == 0: # for f in federates: # f.cash = 0 #", "sources = nettopObj.sources # print([s.name for s in sources]) linklist = [Link(source =", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) # print(federates) # print(linklist)", "sys, os sys.path.append(os.path.abspath('..')) from resources.classes import * from resources.globalv import * from collections", "uselinkcost = 0) for f in set(federatenames)] # federateDict = {f.name: f for", "import random import hashlib from resources.optimizeMILP import optimizeMILP from multiprocessing import Process, Manager", "= len(topollist) allProcs = [] for proc in range(nproc): tempfilename = filename[:-2] +", "in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict", "range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc) objlist =", "solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print(\"New tuple cost", "nettopObj.edges] # time = 0 # newtasks = [Task(id = id + n,", "= dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename): with open(filename, 'rb') as", "in objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc, filename with open(filename, 'rb') as", "outfile) if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"This processed raw data of twitter.\")", "print(\"length of cost value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost)", "numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources]) destinations = elementnames[-2:] sources", "l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]),", "f in zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\", sharelinkcost, uselinkcost) # print(\"length of cost", "= 0 # basecost = [0, 200, 400, 600, 800, 1000] seedlist =", "in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l in all_possible_edges", "e in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) # print(nettopObj.destinations)", "= fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] federateDict = {f.name: f", "infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for net in netlist} with open(dir_topologies +", "zip(nettopObj.elements, federatenames)] elementDict = {e.name: e for e in elements} sources = [e", "seedlist, filename, numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s in", "for a in allProcs: a.join() finalDict = {} for proc in range(nproc): tempfilename", "for seed in seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i]", "for s in sources]) destinations = elementnames[-2:] sources = [e for e in", "i in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in allProcs:", "0 for f in fedPriceDict.keys()}, edgelist = []) solutionObj = optimizeMILP(elements = elements,", "nettopObj.elements) # elements = [Element(name = e, capacity=elementcapacity, size = 0, owner =", "open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for net in netlist}", "for e in elements if e.name not in nettopObj.destinations] # # sources =", "print(\"new tuple:\", sharelinkcost, uselinkcost) # print(\"length of cost value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid,", "calAuctionScore(): global filename print(filename) if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict =", "mintup in [(500,501), (400,600)]: if mintup in costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup]", "federateDict = {f.name: f for f in federates} # # print(\"element names:\", nettopObj.elements)", "value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] #", "nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0: objDict = {obj.hashid: obj for obj in", "= NetTop(elementnames, all_edges, federatenames, sources, destinations) if tempNetTop.hashid not in hashNetworkDict: # print(seed,", "e for e in elements} sources = [e for e in elements if", "import argparse dir_topologies = os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global seedlist, filename, numfederates,", "open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in hashNetworkDict.items(): finalDict[h]", "if l[1] in destinations and l not in all_edges] existingedges2desgin = [l for", "for a, b in list(product(elementnames, elementnames)) if (a != b and element_federate_dict[a] !=", "in elementnames if e not in destinations] if os.path.isfile(filename): with open(filename, 'rb') as", "in destinations] if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict", "+ '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc) objlist = [topollist[i] for i in", "cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)]", "# fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} # federates =", "(sharelinkcost, uselinkcost) for fname in federatenames} federates = [Federate(name = f, cash =", "size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] time =", "hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist): # print(\"New", "def updateCostValue(objlist, proc, tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] =", "'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in list(hashNetworkDict.items()): finalDict[h] =", "with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in hashNetworkDict.items():", "f in fedPriceDict.keys()}, edgelist = []) solutionObj = optimizeMILP(elements = elements, linklist =", "infile: hashNetworkDict = pickle.load(infile) for h, obj in hashNetworkDict.items(): finalDict[h] = obj with", "'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {} for proc in range(60): tempfilename = filename[:-2]", "destin_count > len(satellites): # continue if s in destinations or d in destinations:", "os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) # topollist = hashNetworkDict.values()", "netlist = [] for (numfederates, numelements), edgedivider in list(fedeldensitylist): filename = dir_topologies +", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "from multiprocessing import Process, Manager import argparse dir_topologies = os.path.abspath('..') + '/topologies_new/' def", "in sources]) # linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity =", "= {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\", sharelinkcost,", "'wb') as outfile: pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict,", "in list(fedeldensitylist): # filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames =", "= nettopObj.destinations, storedtasks = [], newtasks = newtasks, time = time, federates =", "if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"This processed raw data of twitter.\") parser.add_argument('--nproc',", "print([s.name for s in sources]) # linklist = [Link(source = elementDict[e1], destin =", "linklist, destinations = nettopObj.destinations, storedtasks = [], newtasks = newtasks, time = time,", "filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc) objlist = [topollist[i] for i", "= 0 # f.sharelinkcost = sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict = {e:", "# sources = [e for e in elements if e.name not in nettopObj.destinations]", "netlist} with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile) if __name__ ==", "outfile: pickle.dump(finalDict, outfile) for proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global", "for l in all_edges if l[1] in destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin))", "+ newedges # print(newedges) # print(len(all_edges)) all_edge_set = set([]) destin_count = 0 for", "sources = nettopObj.sources # # print([s.name for s in sources]) # linklist =", "global filename print(filename) if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile)", "for (numfederates, numelements), edgedivider in list(fedeldensitylist): # filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates,", "= linklist, destinations = nettopObj.destinations, storedtasks = [], newtasks = newtasks, time =", "# for f in federates: # f.cash = 0 # f.sharelinkcost = sharelinkcost", "for proc in range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as", "hashNetworkDict.items(): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) for proc", "for (numfederates, numelements), edgedivider in list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider)", "= [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0,", "+= costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key = lambda x:", "resources.globalv import * from collections import defaultdict, Counter from itertools import product import", "nettopObj.destinations] # sources = nettopObj.sources # print([s.name for s in sources]) linklist =", "and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = [] all_edges = [] # while len([l", "= [e for l in namelist for e in l] random.shuffle(federatenames) # print(\"shuffle:\",", "(satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b) for a, b in", "= os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global seedlist, filename, numfederates, elementnames, edgedivider numberfederates", "= topol.costValueDict maxtup = (0, 1000) for mintup in [(500,501), (400,600)]: if mintup", "not use this file except in compliance with the License. You may obtain", "n, s in enumerate(sources)] # elfedDict = {e: f for e, f in", "random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges = all_edges + newedges # print(newedges) # print(len(all_edges))", "if e.name not in nettopObj.destinations] # sources = nettopObj.sources # print([s.name for s", "= 0 for edge in all_edges: s, d = edge # if destin_count", "= [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges", "in basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} federates =", "in enumerate(numberfederates)] federatenames = [e for l in namelist for e in l]", "elementnames)) if (a != b and not (a in destinations))] all_edges = random.sample(all_possible_edges,", "os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj)", "in enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0: objDict =", "e2) in nettopObj.edges] # time = 0 # newtasks = [Task(id = id", "< 1000: # return for k, topol in hashNetworkDict.items(): costdict = topol.costValueDict maxtup", "namelist = [n*['f%d'%i] for i, n in enumerate(numberfederates)] federatenames = [e for l", "time, federates = federates, edgePriceDict = edgePriceDict, solutionObj = solutionObj) totalvalue = solutionObj.totalvalue", "1000] seedlist = list(range(0,500)) # for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15),", "= filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc) objlist = [topollist[i] for", "sources]) destinations = elementnames[-2:] sources = [e for e in elementnames if e", "2.0 (the \"License\"); you may not use this file except in compliance with", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "and value:\", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"):", "in nettopObj.destinations] # # sources = nettopObj.sources # # print([s.name for s in", "= hashNetworkDict.values() # if len(hashNetworkDict) < 1000: # return for k, topol in", "twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores on server') parser.add_argument('--n', type=int, default=3, help='cores on server')", "for obj in objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc, filename with open(filename,", "in toplist]) with open(filename[:-2] + '_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile) with open(filename[:-2]", "owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e", "e, f in zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\", sharelinkcost, uselinkcost) # print(\"length of", "outfile) def aggregateNetworks(): netlist = [] for (numfederates, numelements), edgedivider in list(fedeldensitylist): filename", "numfederates, edgedivider) # elementnames = ['e%d'%(i+1) for i in range(numelements)] # createNetTopologies() #", "inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in allProcs: a.join() finalDict", "if nume2d>0: newedges = random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges = all_edges + newedges", "(numfederates, numelements), edgedivider in list(fedeldensitylist): # filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider)", "in federates} # print(\"element names:\", nettopObj.elements) elements = [Element(name = e, capacity=elementcapacity, size", "destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges = random.sample(edge2destin,", "elementnames)) if (a != b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = [] all_edges", "os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global seedlist, filename, numfederates, elementnames, edgedivider numberfederates =", "list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename): with open(filename,", "return def aggregate60Nodes(): for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename", "tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc) objlist = [topollist[i]", "{e.name: e for e in elements} sources = [e for e in elements", "for e in elements} sources = [e for e in elements if e.name", "range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename) if os.path.isfile(filename): with open(filename,", "names:\", nettopObj.elements) # elements = [Element(name = e, capacity=elementcapacity, size = 0, owner", "finalDict = {} for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with", "print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict = {f:", "= [] for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds =", "k, nettopObj in enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0:", "f for f in federates} # # print(\"element names:\", nettopObj.elements) # elements =", "= e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements,", "outfile: objDict = {obj.hashid: obj for obj in objlist} pickle.dump(objDict, outfile) def multiProcCostValue():", "net in netlist} with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile) if", "obj in hashNetworkDict.items(): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile)", "federates} # # print(\"element names:\", nettopObj.elements) # elements = [Element(name = e, capacity=elementcapacity,", "sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist = [n*['f%d'%i] for i, n", "k, topol in hashNetworkDict.items(): costdict = topol.costValueDict maxtup = (0, 1000) for mintup", "pickle import random import hashlib from resources.optimizeMILP import optimizeMILP from multiprocessing import Process,", "# print([s.name for s in sources]) # linklist = [Link(source = elementDict[e1], destin", "os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename) if os.path.isfile(filename): with open(filename, 'rb')", "optimizeMILP(elements = elements, linklist = linklist, destinations = nettopObj.destinations, storedtasks = [], newtasks", "= obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist =", "= vars(args) nproc = argsdict['nproc'] time = 0 # basecost = [0, 200,", "sources = [e for e in elements if e.name not in nettopObj.destinations] #", "enumerate(sources)] elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(\"new", "hashlib from resources.optimizeMILP import optimizeMILP from multiprocessing import Process, Manager import argparse dir_topologies", "fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] # federateDict = {f.name: f", "for l in all_possible_edges if l[1] in destinations and l not in all_edges]", "f.uselinkcost = uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} # print(edgePriceDict)", "argsdict = vars(args) nproc = argsdict['nproc'] time = 0 # basecost = [0,", "outfile: pickle.dump(hashNetDict, outfile) if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"This processed raw data", "element_federate_dict[b])] all_possible_edges = [] all_edges = [] # while len([l for l in", "global nproc, filename with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) topollist =", "600, 800, 1000] seedlist = list(range(0,500)) # for (numfederates, numelements), edgedivider in reversed(list(product([(2,10),", "b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = [] all_edges = [] # while", "totalvalue # print(\"New tuple cost and value:\", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc,", "# print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)]", "open(filename[:-2] + '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes(): for", "numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for", "= {e.name: e for e in elements} sources = [e for e in", "= {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict)", "all_edges + newedges # print(newedges) # print(len(all_edges)) all_edge_set = set([]) destin_count = 0", "= 0 newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value,", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "for a, b in list(product(elementnames, elementnames)) if (a != b and not (a", "parser.add_argument('--nproc', type=int, default=3, help='cores on server') parser.add_argument('--n', type=int, default=3, help='cores on server') args", "fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} # federates = [Federate(name", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "if obj.costValueDict} else: hashNetworkDict = {} for seed in seedlist: # print(seed) random.seed(seed)", "nettopObj.sources # print([s.name for s in sources]) linklist = [Link(source = elementDict[e1], destin", "all_edges] existingedges2desgin = [l for l in all_edges if l[1] in destinations] nume2d", "in all_edges: s, d = edge # if destin_count > len(satellites): # continue", "for e in elements if e.name not in nettopObj.destinations] # sources = nettopObj.sources", "= {obj.hashid: obj for obj in objlist} with open(tempfilename, 'wb') as outfile: pickle.dump(objDict,", "hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict,", "init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] # elfedDict = {e: f", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname =", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "'rb') as infile: hashNetworkDict = pickle.load(infile) # topollist = hashNetworkDict.values() # if len(hashNetworkDict)", "= f, cash = 0, sharelinkcost = 0, uselinkcost = 0) for f", "infile: hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values()) N = len(topollist) allProcs = []", "0 for edge in all_edges: s, d = edge # if destin_count >", "= totalvalue # print(\"New tuple cost and value:\", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist,", "inds = range(proc, N, nproc) objlist = [topollist[i] for i in inds] p", "+ '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes(): for numfederates,", "federatenames} federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost", "not in destinations] if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile)", "list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations) if tempNetTop.hashid not in hashNetworkDict:", "(satellites[2],stations[0])] # all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a", "elements} sources = [e for e in elements if e.name not in nettopObj.destinations]", "# if destin_count > len(satellites): # continue if s in destinations or d", "obj for obj in objlist} with open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile) #", "l in all_edges if l[1] in destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin)) #", "open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile) # with open(filename, 'rb') as infile: #", "netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for net in netlist} with open(dir_topologies + 'hashNetDict.p',", "f.cash = 0 # f.sharelinkcost = sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict =", "# newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time", "f in zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name = f, cash = 0,", "= elements, linklist = linklist, destinations = nettopObj.destinations, storedtasks = [], newtasks =", "200, 400, 600, 800, 1000] seedlist = list(range(0,500)) # for (numfederates, numelements), edgedivider", "filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename): with open(filename, 'rb')", "if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for", "in nettopObj.destinations] # sources = nettopObj.sources # print([s.name for s in sources]) linklist", "while len([l for l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b)", "maxtup = (0, 1000) for mintup in [(500,501), (400,600)]: if mintup in costdict:", "[], newtasks = newtasks, time = time, federates = federates, edgePriceDict = edgePriceDict,", "time = 0 # newtasks = [Task(id = id + n, element=s, lastelement=s,", "uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] # federateDict = {f.name: f for", "= {obj.hashid: obj for obj in objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc,", "calCostValue(nettopObj) if k%20 == 0: objDict = {obj.hashid: obj for obj in objlist}", "'_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc) objlist = [topollist[i] for i in inds]", "'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes(): for numfederates, numelements, edgedivider", "limitations under the License. \"\"\" import sys, os sys.path.append(os.path.abspath('..')) from resources.classes import *", "'rb') as infile: hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values()) N = len(topollist) allProcs", "if (a != b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = [] all_edges =", "nettopObj.federates)} # federates = [Federate(name = f, cash = 0, sharelinkcost = 0,", "all_edges = [] # while len([l for l in all_possible_edges if l[1] in", "totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print(\"New", "in zip(nettopObj.elements, federatenames)] elementDict = {e.name: e for e in elements} sources =", "vars(args) nproc = argsdict['nproc'] time = 0 # basecost = [0, 200, 400,", "in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) #", "as outfile: objDict = {obj.hashid: obj for obj in objlist} pickle.dump(objDict, outfile) def", "in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in allProcs: a.join()", "if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist):", "# print(nettopObj.destinations) # print(len(newtasks)) # print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict", "outfile) for proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename)", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "(numfederates, numelements), edgedivider in list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) #", "= elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1,", "capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] #", "size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict", "print(newedges) # print(len(all_edges)) all_edge_set = set([]) destin_count = 0 for edge in all_edges:", "in elements} sources = [e for e in elements if e.name not in", "for net in netlist} with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile)", "range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict =", "[e for e in elements if e.name not in nettopObj.destinations] # sources =", "for n, s in enumerate(sources)] # elfedDict = {e: f for e, f", "f.sharelinkcost = sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e", "in elements if e.name not in nettopObj.destinations] # sources = nettopObj.sources # print([s.name", "s in enumerate(sources)] elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)}", "= {f.name: f for f in federates} # print(\"element names:\", nettopObj.elements) elements =", "with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) # topollist = hashNetworkDict.values() #", "all_edges if l[1] in destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if", "processed raw data of twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores on server') parser.add_argument('--n', type=int,", "len(hashNetworkDict) < 1000: # return for k, topol in hashNetworkDict.items(): costdict = topol.costValueDict", "for i in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in", "return for k, topol in hashNetworkDict.items(): costdict = topol.costValueDict maxtup = (0, 1000)", "tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile)", "= {} for proc in range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename,", "if mintup in costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist =", "Unless required by applicable law or agreed to in writing, software distributed under", "0: objDict = {obj.hashid: obj for obj in objlist} with open(tempfilename, 'wb') as", "open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values()) N = len(topollist)", "print(filename) if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) # topollist", "def multiProcCostValue(): global nproc, filename with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile)", "'wb') as outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as outfile: objDict =", "n, s in enumerate(sources)] elfedDict = {e: f for e, f in zip(nettopObj.elements,", "uselinkcost) for fname in federatenames} federates = [Federate(name = f, cash = 0,", "for l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a,", "pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist = [] for (numfederates, numelements), edgedivider in list(fedeldensitylist):", "= federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] elementDict = {e.name: e for e", "(a in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l in", "= fedPriceDict[f][1]) for f in set(federatenames)] federateDict = {f.name: f for f in", "= pickle.load(infile) for h, obj in hashNetworkDict.items(): finalDict[h] = obj with open(filename, 'wb')", "+ 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] elfedDict = {e:", "global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj", "= numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources]) destinations = elementnames[-2:] sources =", "def createNetTopologies(): global seedlist, filename, numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name", "print(\"element names:\", nettopObj.elements) # elements = [Element(name = e, capacity=elementcapacity, size = 0,", "open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) for proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2))", "= all_edges + newedges # print(newedges) # print(len(all_edges)) all_edge_set = set([]) destin_count =", "# f.cash = 0 # f.sharelinkcost = sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict", "linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations)", "= 0, uselinkcost = 0) for f in set(federatenames)] # federateDict = {f.name:", "for e in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) #", "+ 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename): with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile))", "on server') args = parser.parse_args() argsdict = vars(args) nproc = argsdict['nproc'] time =", "pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc, filename with open(filename, 'rb') as infile: hashNetworkDict", "for the specific language governing permissions and limitations under the License. \"\"\" import", "b in list(product(elementnames, elementnames)) if (a != b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges", "[0, 200, 400, 600, 800, 1000] seedlist = list(range(0,500)) # for (numfederates, numelements),", "objlist} with open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile) # with open(filename, 'rb') as", "in seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] += 1", "topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0: objDict = {obj.hashid: obj for obj", "'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict =", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "elements if e.name not in nettopObj.destinations] # sources = nettopObj.sources # print([s.name for", "f, cash = 0, sharelinkcost = 0, uselinkcost = 0) for f in", "existingedges2desgin = [l for l in all_edges if l[1] in destinations] nume2d =", "args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in allProcs: a.join() finalDict = {} for proc", "5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] elfedDict = {e: f", "penalty=penalty) for n, s in enumerate(sources)] elfedDict = {e: f for e, f", "+= 1 namelist = [n*['f%d'%i] for i, n in enumerate(numberfederates)] federatenames = [e", "0 # f.sharelinkcost = sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0]", "print(\"shuffle:\", federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]),", "pickle.load(infile) for h, obj in hashNetworkDict.items(): finalDict[h] = obj with open(filename, 'wb') as", "= {net.hashid: net for net in netlist} with open(dir_topologies + 'hashNetDict.p', 'wb') as", "= 0 # newtasks = [Task(id = id + n, element=s, lastelement=s, size=size,", "cost and value:\", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename): global filename if", "in list(product(elementnames, elementnames)) if (a != b and not (a in destinations))] all_edges", "# print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict =", "\"\"\" Copyright 2018, <NAME>, Stevens Institute of Technology Licensed under the Apache License,", "= id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True,", "fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} federates = [Federate(name =", "in writing, software distributed under the License is distributed on an \"AS IS\"", "= linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges]", "+ '_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p', 'wb') as", "not (a in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l", "else: hashNetworkDict = {} for seed in seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames):", "as outfile: pickle.dump(objDict, outfile) # with open(filename, 'rb') as infile: # hashNetworkDict =", "in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for f in federates: #", "or agreed to in writing, software distributed under the License is distributed on", "federates = [Federate(name = f, cash = 0, sharelinkcost = 0, uselinkcost =", "== '__main__': parser = argparse.ArgumentParser(description=\"This processed raw data of twitter.\") parser.add_argument('--nproc', type=int, default=3,", "> len(satellites): # continue if s in destinations or d in destinations: destin_count", "f for f in federates} # print(\"element names:\", nettopObj.elements) elements = [Element(name =", "pickle.load(infile) topollist = list(hashNetworkDict.values()) N = len(topollist) allProcs = [] for proc in", "sharelinkcost, uselinkcost in basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames}", "{e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\", sharelinkcost, uselinkcost)", "'_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p', 'wb') as outfile:", "not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for f in federates:", "# while len([l for l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges =", "numfederates, edgedivider) # if os.path.isfile(filename): with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict =", "b in list(product(elementnames, elementnames)) if (a != b and not (a in destinations))]", "elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # federates =", "if (a != b and not (a in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider))", "federatenames)] elementDict = {e.name: e for e in elements} sources = [e for", "{fname: (sharelinkcost, uselinkcost) for fname in federatenames} # federates = [Federate(name = f,", "1000) for mintup in [(500,501), (400,600)]: if mintup in costdict: topol.auctionscore += costdict[maxtup]", "type=int, default=3, help='cores on server') args = parser.parse_args() argsdict = vars(args) nproc =", "0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] time = 0 newtasks", "obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist = []", "obj.costValueDict} else: hashNetworkDict = {} for seed in seedlist: # print(seed) random.seed(seed) while", "{e.name: e for e in elements} # sources = [e for e in", "[(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges =", "k%20 == 0: objDict = {obj.hashid: obj for obj in objlist} with open(tempfilename,", "def aggregate60Nodes(): for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename =", "with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates #", "(2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements), edgedivider in list(fedeldensitylist): #", "elementnames[-2:] sources = [e for e in elementnames if e not in destinations]", "for sharelinkcost, uselinkcost in basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in", "and not (a in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for", "print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) # print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time,", "as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict = {fname:", "# with open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as", "= obj # with open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename,", "governing permissions and limitations under the License. \"\"\" import sys, os sys.path.append(os.path.abspath('..')) from", "hashNetworkDict = pickle.load(infile) # for h, obj in objDict.items(): # hashNetworkDict[h] = obj", "{obj.hashid: obj for obj in objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc, filename", "# federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost", "with open(filename[:-2] + '_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p',", "for s in sources]) linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity", "= nettopObj.sources # # print([s.name for s in sources]) # linklist = [Link(source", "numelements), edgedivider in list(fedeldensitylist): # filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) #", "infile: hashNetworkDict = pickle.load(infile) # topollist = hashNetworkDict.values() # if len(hashNetworkDict) < 1000:", "costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore,", "# print(elfedDict) # print(\"new tasks:\", newtasks) for sharelinkcost, uselinkcost in basetuples: fedPriceDict =", "!= b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = [] all_edges = [] #", "with open(tempfilename, 'wb') as outfile: objDict = {obj.hashid: obj for obj in objlist}", "for n, s in enumerate(sources)] elfedDict = {e: f for e, f in", "# print(\"New tuple cost and value:\", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename):", "server') args = parser.parse_args() argsdict = vars(args) nproc = argsdict['nproc'] time = 0", "= {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name", "e.name not in nettopObj.destinations] # sources = nettopObj.sources # print([s.name for s in", "this file except in compliance with the License. You may obtain a copy", "costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse =", "s in sources]) destinations = elementnames[-2:] sources = [e for e in elementnames", "you may not use this file except in compliance with the License. You", "# print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) # print(federates) # print(linklist) solutionObj", "outfile) else: return def aggregate60Nodes(): for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3),", "outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as outfile: objDict = {obj.hashid: obj", "federateDict = {f.name: f for f in federates} # print(\"element names:\", nettopObj.elements) elements", "= obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) for proc in range(nproc):", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict", "f in set(federatenames)] # federateDict = {f.name: f for f in federates} #", "collections import defaultdict, Counter from itertools import product import pickle import random import", "+ 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames = ['e%d'%(i+1) for i in range(numelements)] #", "proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc)", "'/topologies_new/' def createNetTopologies(): global seedlist, filename, numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] #", "all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l in all_possible_edges if l[1]", "for h, obj in objDict.items(): # hashNetworkDict[h] = obj # with open(filename, 'wb')", "e, f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print(\"new tasks:\", newtasks) for sharelinkcost,", "sources]) # linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity,", "as outfile: pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes(): for numfederates, numelements, edgedivider in", "numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements,", "solutionObj = optimizeMILP(elements = elements, linklist = linklist, destinations = nettopObj.destinations, storedtasks =", "aggregateNetworks(): netlist = [] for (numfederates, numelements), edgedivider in list(fedeldensitylist): filename = dir_topologies", "list(fedeldensitylist): # filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames = ['e%d'%(i+1)", "# (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b) for a,", "import sys, os sys.path.append(os.path.abspath('..')) from resources.classes import * from resources.globalv import * from", "numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources]) destinations = elementnames[-2:] sources = [e", "= [], newtasks = newtasks, time = time, federates = federates, edgePriceDict =", "federatenames = [e for l in namelist for e in l] random.shuffle(federatenames) #", "# elementnames = ['e%d'%(i+1) for i in range(numelements)] # createNetTopologies() # multiProcCostValue() #", "in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print(\"new tasks:\", newtasks) for sharelinkcost, uselinkcost in", "as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for net in netlist} with open(dir_topologies", "in namelist for e in l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames) # all_edges =", "import defaultdict, Counter from itertools import product import pickle import random import hashlib", "with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in list(hashNetworkDict.items()):", "from collections import defaultdict, Counter from itertools import product import pickle import random", "print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse = True)[:10] #", "in federates: # f.cash = 0 # f.sharelinkcost = sharelinkcost # f.uselinkcost =", "(4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {} for proc", "edgedivider) # if os.path.isfile(filename): with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid:", "# all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a !=", "objDict.items(): # hashNetworkDict[h] = obj # with open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict,", "for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks()", "{} for proc in range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb')", "= []) solutionObj = optimizeMILP(elements = elements, linklist = linklist, destinations = nettopObj.destinations,", "element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = [] all_edges = [] # while len([l for", "= dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames = ['e%d'%(i+1) for i in", "time, fedPriceDict = fedPriceDict, fedValDict = {f: 0 for f in fedPriceDict.keys()}, edgelist", "in objDict.items(): # hashNetworkDict[h] = obj # with open(filename, 'wb') as outfile: #", "= 0, sharelinkcost = 0, uselinkcost = 0) for f in set(federatenames)] #", "owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] time = 0 newtasks =", "toplist = sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse = True)[:10] # print(filename,", "in zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name = f, cash = 0, sharelinkcost", "'wb') as outfile: pickle.dump(objDict, outfile) # with open(filename, 'rb') as infile: # hashNetworkDict", "tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj):", "seed in seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] +=", "= fedPriceDict[f][1]) for f in set(federatenames)] # federateDict = {f.name: f for f", "file except in compliance with the License. You may obtain a copy of", "e in toplist]) with open(filename[:-2] + '_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile) with", "with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict = {h: obj for", "len(topollist) allProcs = [] for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2)", "= argsdict['nproc'] time = 0 # basecost = [0, 200, 400, 600, 800,", "open(dir_topologies + 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile) if __name__ == '__main__': parser", "topol.auctionscore += costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key = lambda", "sys.path.append(os.path.abspath('..')) from resources.classes import * from resources.globalv import * from collections import defaultdict,", "nume2d>0: newedges = random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges = all_edges + newedges #", "# sources = nettopObj.sources # print([s.name for s in sources]) linklist = [Link(source", "from resources.globalv import * from collections import defaultdict, Counter from itertools import product", "filename print(filename) if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) #", "sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse = True)[:10] # print(filename, [e.auctionscore for", "= 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] federateDict", "storedtasks = [], newtasks = newtasks, time = time, federates = federates, edgePriceDict", "pickle.load(infile) for h, obj in list(hashNetworkDict.items()): finalDict[h] = obj with open(filename, 'wb') as", "= [] all_edges = [] # while len([l for l in all_possible_edges if", "finalDict = {} for proc in range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with", "= tempNetTop with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames =", "hashNetworkDict.items(): costdict = topol.costValueDict maxtup = (0, 1000) for mintup in [(500,501), (400,600)]:", "= [topollist[i] for i in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for", "= edge # if destin_count > len(satellites): # continue if s in destinations", "in all_possible_edges if l[1] in destinations and l not in all_edges] existingedges2desgin =", "namelist for e in l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames) # all_edges = [(satellites[0],satellites[1]),", "for proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename) if", "edge2destin = [l for l in all_possible_edges if l[1] in destinations and l", "print(\"New tuple cost and value:\", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename): global", "n in enumerate(numberfederates)] federatenames = [e for l in namelist for e in", "= f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f", "as outfile: pickle.dump(finalDict, outfile) for proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore():", "print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print(\"New tuple cost and value:\", sharelinkcost, uselinkcost,", "in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename) if os.path.isfile(filename): with", "License. \"\"\" import sys, os sys.path.append(os.path.abspath('..')) from resources.classes import * from resources.globalv import", "# print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) # print(federates) #", "law or agreed to in writing, software distributed under the License is distributed", "not in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb') as", "penalty=penalty) for n, s in enumerate(sources)] # elfedDict = {e: f for e,", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "active=True, penalty=penalty) for n, s in enumerate(sources)] elfedDict = {e: f for e,", "under the License. \"\"\" import sys, os sys.path.append(os.path.abspath('..')) from resources.classes import * from", "with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist = [] for", "in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a, b in", "pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes(): for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11),", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict", "for h, obj in list(hashNetworkDict.items()): finalDict[h] = obj with open(filename, 'wb') as outfile:", "set(federatenames)] # federateDict = {f.name: f for f in federates} # # print(\"element", "= argparse.ArgumentParser(description=\"This processed raw data of twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores on server')", "uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] federateDict = {f.name: f for f", "in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements),", "proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile:", "infile: hashNetworkDict = pickle.load(infile) for h, obj in list(hashNetworkDict.items()): finalDict[h] = obj with", "elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2)", "or implied. See the License for the specific language governing permissions and limitations", "open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict", "nettopObj.federates)} # print(\"new tuple:\", sharelinkcost, uselinkcost) # print(\"length of cost value dict:\", len(nettopObj.costValueDict))", "nettopObj.edges] time = 0 newtasks = [Task(id = id + n, element=s, lastelement=s,", "# sources = nettopObj.sources # # print([s.name for s in sources]) # linklist", "federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost =", "for fname in federatenames} # federates = [Federate(name = f, cash = 0,", "# time = 0 # newtasks = [Task(id = id + n, element=s,", "filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in", "of cost value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not", "edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates,", "as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict = {h: obj for h,obj in hashNetworkDict.items()", "basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} federates = [Federate(name", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "for (e,f) in zip(nettopObj.elements, federatenames)] elementDict = {e.name: e for e in elements}", "except in compliance with the License. You may obtain a copy of the", "for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as", "= [l for l in all_possible_edges if l[1] in destinations and l not", "obj in objlist} with open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile) # with open(filename,", "zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e for e in elements} # sources", "{} for seed in seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates)))", "finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist", "all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations) if tempNetTop.hashid not", "expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] # elfedDict", "e in l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]),", "os sys.path.append(os.path.abspath('..')) from resources.classes import * from resources.globalv import * from collections import", "[] all_edges = [] # while len([l for l in all_possible_edges if l[1]", "{f.name: f for f in federates} # print(\"element names:\", nettopObj.elements) elements = [Element(name", "print(len(all_edges)) all_edges = all_edges + newedges # print(newedges) # print(len(all_edges)) all_edge_set = set([])", "elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner)", "[Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time,", "createNetTopologies(): global seedlist, filename, numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for", "edgedivider in list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename):", "for e in elementnames if e not in destinations] if os.path.isfile(filename): with open(filename,", "newtasks) for sharelinkcost, uselinkcost in basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname", "open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in list(hashNetworkDict.items()): finalDict[h]", "optimizeMILP from multiprocessing import Process, Manager import argparse dir_topologies = os.path.abspath('..') + '/topologies_new/'", "print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist = [n*['f%d'%i]", "= 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] #", "= list(range(0,500)) # for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20),", "# f.uselinkcost = uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} #", "express or implied. See the License for the specific language governing permissions and", "open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as outfile: objDict", "the License. \"\"\" import sys, os sys.path.append(os.path.abspath('..')) from resources.classes import * from resources.globalv", "a in allProcs: a.join() finalDict = {} for proc in range(nproc): tempfilename =", "for f in federates} # print(\"element names:\", nettopObj.elements) elements = [Element(name = e,", "fedPriceDict[f][1]) for f in set(federatenames)] # federateDict = {f.name: f for f in", "id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty)", "print(\"element names:\", nettopObj.elements) elements = [Element(name = e, capacity=elementcapacity, size = 0, owner", "= dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {} for proc in range(60):", "fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] federateDict = {f.name: f for", "sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges}", "s in enumerate(sources)] # elfedDict = {e: f for e, f in zip(nettopObj.elements,", "multiProcCostValue(): global nproc, filename with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) topollist", "newedges = random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges = all_edges + newedges # print(newedges)", "[]) solutionObj = optimizeMILP(elements = elements, linklist = linklist, destinations = nettopObj.destinations, storedtasks", "with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values()) N =", "s in sources]) # linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity", "edgedivider) finalDict = {} for proc in range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2)", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "sharelinkcost = 0, uselinkcost = 0) for f in set(federatenames)] # federateDict =", "= time, federates = federates, edgePriceDict = edgePriceDict, solutionObj = solutionObj) totalvalue =", "for (e1, e2) in nettopObj.edges] # time = 0 # newtasks = [Task(id", "tempNetTop.hashid not in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb')", "len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost,", "sharelinkcost, uselinkcost) # print(\"length of cost value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) #", "tempNetTop with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates", "x.auctionscore, reverse = True)[:10] # print(filename, [e.auctionscore for e in toplist]) with open(filename[:-2]", "e in elementnames if e not in destinations] if os.path.isfile(filename): with open(filename, 'rb')", "destinations) if tempNetTop.hashid not in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with", "nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) # print(len(newtasks))", "(e,f) in zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e for e in elements}", "allProcs: a.join() finalDict = {} for proc in range(nproc): tempfilename = filename[:-2] +", "= pickle.load(infile) # topollist = hashNetworkDict.values() # if len(hashNetworkDict) < 1000: # return", "s in sources]) linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity =", "for (e1, e2) in nettopObj.edges] time = 0 newtasks = [Task(id = id", "all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a != b", "allProcs.append(p) for a in allProcs: a.join() finalDict = {} for proc in range(nproc):", "(satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b) for a, b in list(product(elementnames,", "elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\",", "(sharelinkcost, uselinkcost) for fname in federatenames} # federates = [Federate(name = f, cash", "objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc, filename with open(filename, 'rb') as infile:", "= range(proc, N, nproc) objlist = [topollist[i] for i in inds] p =", "federatenames = nettopObj.federates # fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames}", "= [] # while len([l for l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity:", "specific language governing permissions and limitations under the License. \"\"\" import sys, os", "(satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames))", "set([]) destin_count = 0 for edge in all_edges: s, d = edge #", "zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name = f, cash = 0, sharelinkcost =", "open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist = [] for (numfederates,", "size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)]", "print(len(newtasks)) # print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict", "hashNetworkDict = pickle.load(infile) for h, obj in hashNetworkDict.items(): finalDict[h] = obj with open(filename,", "0 # basecost = [0, 200, 400, 600, 800, 1000] seedlist = list(range(0,500))", "# basecost = [0, 200, 400, 600, 800, 1000] seedlist = list(range(0,500)) #", "# print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print(\"New tuple cost and value:\", sharelinkcost,", "e in elements if e.name not in nettopObj.destinations] # # sources = nettopObj.sources", "if e.name not in nettopObj.destinations] # # sources = nettopObj.sources # # print([s.name", "in set(federatenames)] federateDict = {f.name: f for f in federates} # print(\"element names:\",", "= uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} # print(edgePriceDict) #", "elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) #", "a, b in list(product(elementnames, elementnames)) if (a != b and not (a in", "random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist = [n*['f%d'%i] for", "open(tempfilename, 'wb') as outfile: objDict = {obj.hashid: obj for obj in objlist} pickle.dump(objDict,", "outfile: pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else:", "e for e in elements} # sources = [e for e in elements", "(0, 1000) for mintup in [(500,501), (400,600)]: if mintup in costdict: topol.auctionscore +=", "# print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist =", "# print(len(all_edges)) all_edges = all_edges + newedges # print(newedges) # print(len(all_edges)) all_edge_set =", "init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] elfedDict = {e: f for", "objlist = [topollist[i] for i in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p)", "= [e for e in elements if e.name not in nettopObj.destinations] # #", "all_possible_edges = [] all_edges = [] # while len([l for l in all_possible_edges", "# # print([s.name for s in sources]) # linklist = [Link(source = elementDict[e1],", "itertools import product import pickle import random import hashlib from resources.optimizeMILP import optimizeMILP", "= [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f)", "pickle.load(infile) hashNetworkDict = {h: obj for h,obj in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict", "1000: # return for k, topol in hashNetworkDict.items(): costdict = topol.costValueDict maxtup =", "if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict = {h:", "toplist]) with open(filename[:-2] + '_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile) with open(filename[:-2] +", "in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc) objlist", "and limitations under the License. \"\"\" import sys, os sys.path.append(os.path.abspath('..')) from resources.classes import", "all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations) if", "element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s", "range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict =", "for h,obj in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict = {} for seed in", "fname in federatenames} federates = [Federate(name = f, cash = 0, sharelinkcost =", "# # sources = nettopObj.sources # # print([s.name for s in sources]) #", "print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] ==", "0 # newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value,", "[e for l in namelist for e in l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames)", "pickle.dump(objDict, outfile) # with open(filename, 'rb') as infile: # hashNetworkDict = pickle.load(infile) #", "as infile: hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values()) N = len(topollist) allProcs =", "linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] time", "a, b in list(product(elementnames, elementnames)) if (a != b and element_federate_dict[a] != element_federate_dict[b])]", "# topollist = hashNetworkDict.values() # if len(hashNetworkDict) < 1000: # return for k,", "elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for", "= [e for e in elementnames if e not in destinations] if os.path.isfile(filename):", "f in federates} # print(\"element names:\", nettopObj.elements) elements = [Element(name = e, capacity=elementcapacity,", "global seedlist, filename, numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s", "nettopObj.sources # # print([s.name for s in sources]) # linklist = [Link(source =", "* from resources.globalv import * from collections import defaultdict, Counter from itertools import", "value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] elfedDict", "len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges = random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges =", "fedPriceDict, fedValDict = {f: 0 for f in fedPriceDict.keys()}, edgelist = []) solutionObj", "import optimizeMILP from multiprocessing import Process, Manager import argparse dir_topologies = os.path.abspath('..') +", "in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict = {} for seed in seedlist: #", "pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as outfile: objDict = {obj.hashid: obj for obj", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l in all_possible_edges if l[1] in destinations and", "owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] elementDict = {e.name: e for", "in compliance with the License. You may obtain a copy of the License", "'rb') as infile: # hashNetworkDict = pickle.load(infile) # for h, obj in objDict.items():", "KIND, either express or implied. See the License for the specific language governing", "== 0: objDict = {obj.hashid: obj for obj in objlist} with open(tempfilename, 'wb')", "# elementDict = {e.name: e for e in elements} # sources = [e", "# print(nume2d) if nume2d>0: newedges = random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges = all_edges", "= list(hashNetworkDict.values()) N = len(topollist) allProcs = [] for proc in range(nproc): tempfilename", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames = ['e%d'%(i+1) for i in range(numelements)]", "= 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] elementDict = {e.name:", "topollist = list(hashNetworkDict.values()) N = len(topollist) allProcs = [] for proc in range(nproc):", "print(filename, [e.auctionscore for e in toplist]) with open(filename[:-2] + '_top10.p', 'wb') as outfile:", "if destin_count > len(satellites): # continue if s in destinations or d in", "f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\", sharelinkcost, uselinkcost) #", "print(\"new tasks:\", newtasks) for sharelinkcost, uselinkcost in basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost)", "active=True, penalty=penalty) for n, s in enumerate(sources)] # elfedDict = {e: f for", "data of twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores on server') parser.add_argument('--n', type=int, default=3, help='cores", "i, n in enumerate(numberfederates)] federatenames = [e for l in namelist for e", "hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict = {} for seed in seedlist: # print(seed)", "e in elements if e.name not in nettopObj.destinations] # sources = nettopObj.sources #", "costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key =", "in federatenames} federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0],", "nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for f in federates: # f.cash = 0", "costdict = topol.costValueDict maxtup = (0, 1000) for mintup in [(500,501), (400,600)]: if", "= solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue", "os.path.isfile(filename): with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for net", "OF ANY KIND, either express or implied. See the License for the specific", "outfile) with open(filename[:-2] + '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else: return def", "sources, destinations) if tempNetTop.hashid not in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop", "federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] elementDict = {e.name: e for e in", "filename, numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources])", "not in nettopObj.destinations] # # sources = nettopObj.sources # # print([s.name for s", "proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename) if os.path.isfile(filename):", "'wb') as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist = [] for (numfederates, numelements),", "all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] #", "= 0) for f in set(federatenames)] # federateDict = {f.name: f for f", "+ '/topologies_new/' def createNetTopologies(): global seedlist, filename, numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates]", "objDict = {obj.hashid: obj for obj in objlist} with open(tempfilename, 'wb') as outfile:", "fedPriceDict[f][1]) for f in set(federatenames)] federateDict = {f.name: f for f in federates}", "from resources.optimizeMILP import optimizeMILP from multiprocessing import Process, Manager import argparse dir_topologies =", "on server') parser.add_argument('--n', type=int, default=3, help='cores on server') args = parser.parse_args() argsdict =", "with open(filename, 'rb') as infile: # hashNetworkDict = pickle.load(infile) # for h, obj", "n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n,", "'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile) if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"This", "if e not in destinations] if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict", "sources]) linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size", "key = lambda x: x.auctionscore, reverse = True)[:10] # print(filename, [e.auctionscore for e", "default=3, help='cores on server') parser.add_argument('--n', type=int, default=3, help='cores on server') args = parser.parse_args()", "See the License for the specific language governing permissions and limitations under the", "[] for (numfederates, numelements), edgedivider in list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates,", "uselinkcost in basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} federates", "pickle.dump(hashNetDict, outfile) if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"This processed raw data of", "in federates} # # print(\"element names:\", nettopObj.elements) # elements = [Element(name = e,", "as infile: # hashNetworkDict = pickle.load(infile) # for h, obj in objDict.items(): #", "help='cores on server') args = parser.parse_args() argsdict = vars(args) nproc = argsdict['nproc'] time", "for fname in federatenames} federates = [Federate(name = f, cash = 0, sharelinkcost", "\"License\"); you may not use this file except in compliance with the License.", "in netlist} with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile) if __name__", "e.name not in nettopObj.destinations] # # sources = nettopObj.sources # # print([s.name for", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "net for net in netlist} with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict,", "destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames,", "agreed to in writing, software distributed under the License is distributed on an", "enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0: objDict = {obj.hashid:", "0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict = {e.name:", "elements if e.name not in nettopObj.destinations] # # sources = nettopObj.sources # #", "destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a !=", "objDict = {obj.hashid: obj for obj in objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global", "in list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename): with", "implied. See the License for the specific language governing permissions and limitations under", "sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] # federateDict =", "# if os.path.isfile(filename): with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net", "= pickle.load(infile) hashNetworkDict = {h: obj for h,obj in hashNetworkDict.items() if obj.costValueDict} else:", "'wb') as outfile: pickle.dump(finalDict, outfile) for proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def", "in hashNetworkDict.items(): costdict = topol.costValueDict maxtup = (0, 1000) for mintup in [(500,501),", "as outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as outfile: objDict = {obj.hashid:", "x: x.auctionscore, reverse = True)[:10] # print(filename, [e.auctionscore for e in toplist]) with", "l[1] in destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges", "calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in", "= federates, edgePriceDict = edgePriceDict, solutionObj = solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict)", "or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for f in federates: # f.cash =", "[] for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc,", "os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid)", "= 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict =", "= elementDict[e2].owner) for (e1, e2) in nettopObj.edges] # time = 0 # newtasks", "outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict = {fname: (sharelinkcost,", "topol.costValueDict maxtup = (0, 1000) for mintup in [(500,501), (400,600)]: if mintup in", "i = random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist = [n*['f%d'%i] for i, n in", "for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e for e in", "'_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename) if os.path.isfile(filename): with open(filename, 'rb') as infile:", "in nettopObj.edges] time = 0 newtasks = [Task(id = id + n, element=s,", "in all_edges if l[1] in destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d)", "{e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) #", "5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] # elfedDict = {e:", "with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for net in", "updateCostValue(objlist, proc, tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2))", "for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\", sharelinkcost, uselinkcost) # print(\"length", "'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename): with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict", "required by applicable law or agreed to in writing, software distributed under the", "# all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])]", "'_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in", "proc, tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for", "pickle.load(infile) # for h, obj in objDict.items(): # hashNetworkDict[h] = obj # with", "Stevens Institute of Technology Licensed under the Apache License, Version 2.0 (the \"License\");", "[l for l in all_possible_edges if l[1] in destinations and l not in", "edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources]) destinations = elementnames[-2:]", "federatenames, sources, destinations) if tempNetTop.hashid not in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] =", "in destinations and l not in all_edges] existingedges2desgin = [l for l in", "random.shuffle(federatenames) # print(\"shuffle:\", federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]),", "= lambda x: x.auctionscore, reverse = True)[:10] # print(filename, [e.auctionscore for e in", "(400,600)]: if mintup in costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist", "= {f.name: f for f in federates} # # print(\"element names:\", nettopObj.elements) #", "finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) for proc in", "= [Federate(name = f, cash = 0, sharelinkcost = 0, uselinkcost = 0)", "= [l for l in all_edges if l[1] in destinations] nume2d = int(len(sources)/2", "l not in all_edges] existingedges2desgin = [l for l in all_edges if l[1]", "linklist = linklist, destinations = nettopObj.destinations, storedtasks = [], newtasks = newtasks, time", "= {fname: (sharelinkcost, uselinkcost) for fname in federatenames} # federates = [Federate(name =", "solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue #", "800, 1000] seedlist = list(range(0,500)) # for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15),", "tuple:\", sharelinkcost, uselinkcost) # print(\"length of cost value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict)", "= federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e for", "outfile) def multiProcCostValue(): global nproc, filename with open(filename, 'rb') as infile: hashNetworkDict =", "filename with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values()) N", "ANY KIND, either express or implied. See the License for the specific language", "Technology Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "[(a,b) for a, b in list(product(elementnames, elementnames)) if (a != b and not", "hashNetDict = {net.hashid: net for net in netlist} with open(dir_topologies + 'hashNetDict.p', 'wb')", "'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames = ['e%d'%(i+1) for i in range(numelements)] # createNetTopologies()", "'_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes(): for numfederates, numelements,", "server') parser.add_argument('--n', type=int, default=3, help='cores on server') args = parser.parse_args() argsdict = vars(args)", "argparse dir_topologies = os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global seedlist, filename, numfederates, elementnames,", "print(len(all_edges)) all_edge_set = set([]) destin_count = 0 for edge in all_edges: s, d", "obj in objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc, filename with open(filename, 'rb')", "fedValDict = {f: 0 for f in fedPriceDict.keys()}, edgelist = []) solutionObj =", "= sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse = True)[:10] # print(filename, [e.auctionscore", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "as outfile: pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile)", "0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] # federateDict", "product import pickle import random import hashlib from resources.optimizeMILP import optimizeMILP from multiprocessing", "in l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), #", "(the \"License\"); you may not use this file except in compliance with the", "for i, n in enumerate(numberfederates)] federatenames = [e for l in namelist for", "all_edges, federatenames, sources, destinations) if tempNetTop.hashid not in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid]", "'__main__': parser = argparse.ArgumentParser(description=\"This processed raw data of twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores", "1 namelist = [n*['f%d'%i] for i, n in enumerate(numberfederates)] federatenames = [e for", "(4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements), edgedivider in list(fedeldensitylist): # filename =", "with open(filename[:-2] + '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes():", "edgePriceDict = edgePriceDict, solutionObj = solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict)", "[e for e in elementnames if e not in destinations] if os.path.isfile(filename): with", "obj for h,obj in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict = {} for seed", "= [] for (numfederates, numelements), edgedivider in list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements,", "pickle.dump(finalDict, outfile) for proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename", "# print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print(\"New tuple cost and", "= elementDict[e2].owner) for (e1, e2) in nettopObj.edges] time = 0 newtasks = [Task(id", "(2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements), edgedivider in", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "resources.optimizeMILP import optimizeMILP from multiprocessing import Process, Manager import argparse dir_topologies = os.path.abspath('..')", "[n*['f%d'%i] for i, n in enumerate(numberfederates)] federatenames = [e for l in namelist", "argparse.ArgumentParser(description=\"This processed raw data of twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores on server') parser.add_argument('--n',", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "nettopObj in enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0: objDict", "= {e.name: e for e in elements} # sources = [e for e", "= {} for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename,", "True)[:10] # print(filename, [e.auctionscore for e in toplist]) with open(filename[:-2] + '_top10.p', 'wb')", "print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) # print(federates)", "elementDict[e2].owner) for (e1, e2) in nettopObj.edges] time = 0 newtasks = [Task(id =", "nume2d = int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges = random.sample(edge2destin, nume2d)", "int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges = random.sample(edge2destin, nume2d) # print(len(all_edges))", "edgelist = []) solutionObj = optimizeMILP(elements = elements, linklist = linklist, destinations =", "names:\", nettopObj.elements) elements = [Element(name = e, capacity=elementcapacity, size = 0, owner =", "either express or implied. See the License for the specific language governing permissions", "# print(\"element names:\", nettopObj.elements) elements = [Element(name = e, capacity=elementcapacity, size = 0,", "= elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner =", "cost value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not in", "nettopObj.destinations, storedtasks = [], newtasks = newtasks, time = time, federates = federates,", "Institute of Technology Licensed under the Apache License, Version 2.0 (the \"License\"); you", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "time = 0 newtasks = [Task(id = id + n, element=s, lastelement=s, size=size,", "f in federates} # # print(\"element names:\", nettopObj.elements) # elements = [Element(name =", "b and not (a in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l", "defaultdict, Counter from itertools import product import pickle import random import hashlib from", "(satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b) for a, b", "nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0:", "expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] elfedDict =", "if l[1] in destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if nume2d>0:", "[topollist[i] for i in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a", "Process, Manager import argparse dir_topologies = os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global seedlist,", "s in destinations or d in destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges", "to in writing, software distributed under the License is distributed on an \"AS", "raw data of twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores on server') parser.add_argument('--n', type=int, default=3,", "random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist = [n*['f%d'%i] for i, n in enumerate(numberfederates)] federatenames", "elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources]) destinations =", "l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if", "language governing permissions and limitations under the License. \"\"\" import sys, os sys.path.append(os.path.abspath('..'))", "enumerate(sources)] # elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} #", "dir_topologies = os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global seedlist, filename, numfederates, elementnames, edgedivider", "hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames", "in objlist} with open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile) # with open(filename, 'rb')", "owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] # time = 0 #", "= fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] # federateDict = {f.name:", "seedlist = list(range(0,500)) # for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20),", "h, obj in hashNetworkDict.items(): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict,", "for s in sources]) # linklist = [Link(source = elementDict[e1], destin = elementDict[e2],", "= MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict = {f: 0 for f in", "of twitter.\") parser.add_argument('--nproc', type=int, default=3, help='cores on server') parser.add_argument('--n', type=int, default=3, help='cores on", "= list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations) if tempNetTop.hashid not in", "# continue if s in destinations or d in destinations: destin_count += linkcapacity", "def calAuctionScore(): global filename print(filename) if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict", "{f: 0 for f in fedPriceDict.keys()}, edgelist = []) solutionObj = optimizeMILP(elements =", "else: return def aggregate60Nodes(): for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]:", "[(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict =", "for e in toplist]) with open(filename[:-2] + '_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile)", "h, obj in objDict.items(): # hashNetworkDict[h] = obj # with open(filename, 'wb') as", "(e1, e2) in nettopObj.edges] time = 0 newtasks = [Task(id = id +", "# hashNetworkDict[h] = obj # with open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict, outfile)", "MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict = {f: 0 for f in fedPriceDict.keys()},", "nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print(\"New tuple cost and value:\", sharelinkcost, uselinkcost, totalvalue)", "numfederates, edgedivider) finalDict = {} for proc in range(60): tempfilename = filename[:-2] +", "destinations] if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict =", "# with open(filename, 'rb') as infile: # hashNetworkDict = pickle.load(infile) # for h,", "filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {} for proc in", "# # print(\"element names:\", nettopObj.elements) # elements = [Element(name = e, capacity=elementcapacity, size", "'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in hashNetworkDict.items(): finalDict[h] =", "nettopObj.federates)} # print(elfedDict) # print(\"new tasks:\", newtasks) for sharelinkcost, uselinkcost in basetuples: fedPriceDict", "elementDict = {e.name: e for e in elements} # sources = [e for", "for f in set(federatenames)] # federateDict = {f.name: f for f in federates}", "federatenames)] # elementDict = {e.name: e for e in elements} # sources =", "= 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] # time =", "# elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict)", "= random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist = [n*['f%d'%i] for i, n in enumerate(numberfederates)]", "Counter from itertools import product import pickle import random import hashlib from resources.optimizeMILP", "h,obj in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict = {} for seed in seedlist:", "= random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l in all_possible_edges if l[1] in", "nproc = argsdict['nproc'] time = 0 # basecost = [0, 200, 400, 600,", "destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l in all_possible_edges if", "# print(newedges) # print(len(all_edges)) all_edge_set = set([]) destin_count = 0 for edge in", "multiprocessing import Process, Manager import argparse dir_topologies = os.path.abspath('..') + '/topologies_new/' def createNetTopologies():", "400, 600, 800, 1000] seedlist = list(range(0,500)) # for (numfederates, numelements), edgedivider in", "reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements), edgedivider", "'wb') as outfile: pickle.dump(hashNetDict, outfile) if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"This processed", "= {f: 0 for f in fedPriceDict.keys()}, edgelist = []) solutionObj = optimizeMILP(elements", "in range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict", "random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l in all_possible_edges if l[1] in destinations", "as infile: hashNetworkDict = pickle.load(infile) for h, obj in hashNetworkDict.items(): finalDict[h] = obj", "aggregateNetworks() # for (numfederates, numelements), edgedivider in list(fedeldensitylist): # filename = dir_topologies +", "!= element_federate_dict[b])] all_possible_edges = [] all_edges = [] # while len([l for l", "destinations or d in destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set)", "'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for net in netlist} with", "print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print(\"New tuple cost and value:\",", "open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict = {h: obj for h,obj", "(4,20,3), (4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {} for", "destin_count = 0 for edge in all_edges: s, d = edge # if", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "a.join() finalDict = {} for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2)", "for e in l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]),", "elementnames = ['e%d'%(i+1) for i in range(numelements)] # createNetTopologies() # multiProcCostValue() # calAuctionScore()", "all_edges: s, d = edge # if destin_count > len(satellites): # continue if", "= set([]) destin_count = 0 for edge in all_edges: s, d = edge", "import product import pickle import random import hashlib from resources.optimizeMILP import optimizeMILP from", "[(500,501), (400,600)]: if mintup in costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup] # print(topol.auctionscore)", "= optimizeMILP(elements = elements, linklist = linklist, destinations = nettopObj.destinations, storedtasks = [],", "if len(hashNetworkDict) < 1000: # return for k, topol in hashNetworkDict.items(): costdict =", "tasks:\", newtasks) for sharelinkcost, uselinkcost in basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost) for", "e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)]", "in nettopObj.edges] # time = 0 # newtasks = [Task(id = id +", "in fedPriceDict.keys()}, edgelist = []) solutionObj = optimizeMILP(elements = elements, linklist = linklist,", "len(satellites): # continue if s in destinations or d in destinations: destin_count +=", "# return for k, topol in hashNetworkDict.items(): costdict = topol.costValueDict maxtup = (0,", "d = edge # if destin_count > len(satellites): # continue if s in", "= pickle.load(infile) for h, obj in list(hashNetworkDict.items()): finalDict[h] = obj with open(filename, 'wb')", "{net.hashid: net for net in netlist} with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile:", "= elementnames[-2:] sources = [e for e in elementnames if e not in", "open(filename[:-2] + '_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p', 'wb')", "solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict = {f: 0 for f", "uselinkcost)] = totalvalue # print(\"New tuple cost and value:\", sharelinkcost, uselinkcost, totalvalue) def", "if k%20 == 0: objDict = {obj.hashid: obj for obj in objlist} with", "topollist = hashNetworkDict.values() # if len(hashNetworkDict) < 1000: # return for k, topol", "import Process, Manager import argparse dir_topologies = os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global", "= newtasks, time = time, federates = federates, edgePriceDict = edgePriceDict, solutionObj =", "# hashNetworkDict = pickle.load(infile) # for h, obj in objDict.items(): # hashNetworkDict[h] =", "# if len(hashNetworkDict) < 1000: # return for k, topol in hashNetworkDict.items(): costdict", "not in nettopObj.destinations] # sources = nettopObj.sources # print([s.name for s in sources])", "argsdict['nproc'] time = 0 # basecost = [0, 200, 400, 600, 800, 1000]", "hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values()) N = len(topollist) allProcs = [] for", "list(range(0,500)) # for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)],", "all_edge_set = set([]) destin_count = 0 for edge in all_edges: s, d =", "# linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size", "if s in destinations or d in destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s))", "the License for the specific language governing permissions and limitations under the License.", "in enumerate(sources)] # elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)}", "+ 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile) if __name__ == '__main__': parser =", "edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) #", "{e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name =", "outfile: pickle.dump(objDict, outfile) # with open(filename, 'rb') as infile: # hashNetworkDict = pickle.load(infile)", "print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict = {f: 0 for", "# f.sharelinkcost = sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for", "aggregate60Nodes(): for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies", "for e in elements} # sources = [e for e in elements if", "from resources.classes import * from resources.globalv import * from collections import defaultdict, Counter", "[Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner", "dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or", "parser.add_argument('--n', type=int, default=3, help='cores on server') args = parser.parse_args() argsdict = vars(args) nproc", "fname in federatenames} # federates = [Federate(name = f, cash = 0, sharelinkcost", "elementnames if e not in destinations] if os.path.isfile(filename): with open(filename, 'rb') as infile:", "nproc) objlist = [topollist[i] for i in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start()", "newtasks, time = time, federates = federates, edgePriceDict = edgePriceDict, solutionObj = solutionObj)", "License, Version 2.0 (the \"License\"); you may not use this file except in", "nettopObj.federates # fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} # federates", "(a != b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = [] all_edges = []", "filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames = ['e%d'%(i+1) for i", "proc in range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile:", "capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] elementDict", "from itertools import product import pickle import random import hashlib from resources.optimizeMILP import", "{h: obj for h,obj in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict = {} for", "os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict = {h: obj", "for k, nettopObj in enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20 ==", "# print(\"new tasks:\", newtasks) for sharelinkcost, uselinkcost in basetuples: fedPriceDict = {fname: (sharelinkcost,", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "'rb') as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict = {h: obj for h,obj in", "size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] elementDict =", "= (0, 1000) for mintup in [(500,501), (400,600)]: if mintup in costdict: topol.auctionscore", "(e,f) in zip(nettopObj.elements, federatenames)] elementDict = {e.name: e for e in elements} sources", "in elements} # sources = [e for e in elements if e.name not", "e in elements} sources = [e for e in elements if e.name not", "# pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as outfile: objDict = {obj.hashid: obj for", "[Federate(name = f, cash = 0, sharelinkcost = 0, uselinkcost = 0) for", "federates, edgePriceDict = edgePriceDict, solutionObj = solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) #", "= [(a,b) for a, b in list(product(elementnames, elementnames)) if (a != b and", "# print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict = {f: 0", "[(a,b) for a, b in list(product(elementnames, elementnames)) if (a != b and element_federate_dict[a]", "= 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] time = 0", "s, d = edge # if destin_count > len(satellites): # continue if s", "value:\", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname", "<NAME>, Stevens Institute of Technology Licensed under the Apache License, Version 2.0 (the", "in destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames,", "import * from collections import defaultdict, Counter from itertools import product import pickle", "in costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key", "# federates = [Federate(name = f, cash = 0, sharelinkcost = 0, uselinkcost", "enumerate(numberfederates)] federatenames = [e for l in namelist for e in l] random.shuffle(federatenames)", "list(hashNetworkDict.values()) N = len(topollist) allProcs = [] for proc in range(nproc): tempfilename =", "pickle.load(infile) # topollist = hashNetworkDict.values() # if len(hashNetworkDict) < 1000: # return for", "# if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: #", "def calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname", "as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist = [] for (numfederates, numelements), edgedivider", "numelements), edgedivider in list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if", "{e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print(\"new tasks:\",", "+ '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename) if os.path.isfile(filename): with open(filename, 'rb') as", "for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print(\"new tasks:\", newtasks) for", "0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] federateDict =", "# elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # federates", "all_possible_edges if l[1] in destinations and l not in all_edges] existingedges2desgin = [l", "# print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) # print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid,", "sources = [e for e in elementnames if e not in destinations] if", "p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in allProcs: a.join() finalDict =", "numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates,", "= nettopObj.federates # fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} #", "uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid)", "hashNetworkDict.values() # if len(hashNetworkDict) < 1000: # return for k, topol in hashNetworkDict.items():", "N, nproc) objlist = [topollist[i] for i in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename))", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies +", "[3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements), edgedivider in list(fedeldensitylist): # filename = dir_topologies", "- costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse", "0: # for f in federates: # f.cash = 0 # f.sharelinkcost =", "open(filename, 'rb') as infile: # hashNetworkDict = pickle.load(infile) # for h, obj in", "f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print(\"new tasks:\", newtasks) for sharelinkcost, uselinkcost", "import hashlib from resources.optimizeMILP import optimizeMILP from multiprocessing import Process, Manager import argparse", "import pickle import random import hashlib from resources.optimizeMILP import optimizeMILP from multiprocessing import", "[] # while len([l for l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges", "uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for f in", "(3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements), edgedivider in list(fedeldensitylist):", "elements} # sources = [e for e in elements if e.name not in", "outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict = {fname: (sharelinkcost, uselinkcost) for", "for mintup in [(500,501), (400,600)]: if mintup in costdict: topol.auctionscore += costdict[maxtup] -", "pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else: return", "for h, obj in hashNetworkDict.items(): finalDict[h] = obj with open(filename, 'wb') as outfile:", "default=3, help='cores on server') args = parser.parse_args() argsdict = vars(args) nproc = argsdict['nproc']", "destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges,", "hashNetworkDict[h] = obj # with open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict, outfile) with", "destinations and l not in all_edges] existingedges2desgin = [l for l in all_edges", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "{f.name: f for f in federates} # # print(\"element names:\", nettopObj.elements) # elements", "= os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist): # print(\"New topoology:\",", "import * from resources.globalv import * from collections import defaultdict, Counter from itertools", "uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME']", "p.start() allProcs.append(p) for a in allProcs: a.join() finalDict = {} for proc in", "[e.auctionscore for e in toplist]) with open(filename[:-2] + '_top10.p', 'wb') as outfile: pickle.dump(toplist,", "(4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {}", "range(proc, N, nproc) objlist = [topollist[i] for i in inds] p = Process(target=updateCostValue,", "for l in namelist for e in l] random.shuffle(federatenames) # print(\"shuffle:\", federatenames) #", "in all_edges] existingedges2desgin = [l for l in all_edges if l[1] in destinations]", "in set(federatenames)] # federateDict = {f.name: f for f in federates} # #", "(satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b)", "use this file except in compliance with the License. You may obtain a", "in sources]) destinations = elementnames[-2:] sources = [e for e in elementnames if", "0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] # time = 0", "all_edges = all_edges + newedges # print(newedges) # print(len(all_edges)) all_edge_set = set([]) destin_count", "# print([s.name for s in sources]) linklist = [Link(source = elementDict[e1], destin =", "with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) for proc in range(nproc): os.remove(filename[:-2] +", "dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename): with open(filename, 'rb') as infile:", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "= [e for e in elements if e.name not in nettopObj.destinations] # sources", "# print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) #", "in zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e for e in elements} #", "= [0, 200, 400, 600, 800, 1000] seedlist = list(range(0,500)) # for (numfederates,", "in zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\", sharelinkcost, uselinkcost) # print(\"length of cost value", "h, obj in list(hashNetworkDict.items()): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict,", "# print(\"new tuple:\", sharelinkcost, uselinkcost) # print(\"length of cost value dict:\", len(nettopObj.costValueDict)) #", "\"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20", "solutionObj = solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] =", "edge # if destin_count > len(satellites): # continue if s in destinations or", "{obj.hashid: obj for obj in objlist} with open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile)", "time = time, federates = federates, edgePriceDict = edgePriceDict, solutionObj = solutionObj) totalvalue", "destinations = nettopObj.destinations, storedtasks = [], newtasks = newtasks, time = time, federates", "# federateDict = {f.name: f for f in federates} # # print(\"element names:\",", "cash = 0, sharelinkcost = 0, uselinkcost = 0) for f in set(federatenames)]", "if os.path.isfile(filename): with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for", "= nettopObj.sources # print([s.name for s in sources]) linklist = [Link(source = elementDict[e1],", "of Technology Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "obj # with open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb')", "hashNetworkDict = {} for seed in seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i", "time = 0 # basecost = [0, 200, 400, 600, 800, 1000] seedlist", "(3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements), edgedivider in list(fedeldensitylist): # filename", "print(elfedDict) # print(\"new tasks:\", newtasks) for sharelinkcost, uselinkcost in basetuples: fedPriceDict = {fname:", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "zip(nettopObj.elements, nettopObj.federates)} # print(\"new tuple:\", sharelinkcost, uselinkcost) # print(\"length of cost value dict:\",", "# for h, obj in objDict.items(): # hashNetworkDict[h] = obj # with open(filename,", "in federatenames} # federates = [Federate(name = f, cash = 0, sharelinkcost =", "tempfilename): global filename if os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k,", "continue if s in destinations or d in destinations: destin_count += linkcapacity all_edge_set.add((s,d))", "!= b and not (a in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin =", "{fname: (sharelinkcost, uselinkcost) for fname in federatenames} federates = [Federate(name = f, cash", "in destinations or d in destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges =", "= int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges = random.sample(edge2destin, nume2d) #", "# print(\"element names:\", nettopObj.elements) # elements = [Element(name = e, capacity=elementcapacity, size =", "+= linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames, sources,", "not in all_edges] existingedges2desgin = [l for l in all_edges if l[1] in", "federates = federates, edgePriceDict = edgePriceDict, solutionObj = solutionObj) totalvalue = solutionObj.totalvalue #", "or d in destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop", "basecost = [0, 200, 400, 600, 800, 1000] seedlist = list(range(0,500)) # for", "tuple cost and value:\", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename): global filename", "2018, <NAME>, Stevens Institute of Technology Licensed under the Apache License, Version 2.0", "= [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5,", "if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) # topollist =", "mintup in costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(),", "# print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile)", "print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def", "(satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if", "+ 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] # elfedDict =", "# print(len(all_edges)) all_edge_set = set([]) destin_count = 0 for edge in all_edges: s,", "for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N,", "# for (numfederates, numelements), edgedivider in list(fedeldensitylist): # filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements,", "numberfederates[i] += 1 namelist = [n*['f%d'%i] for i, n in enumerate(numberfederates)] federatenames =", "open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) # topollist = hashNetworkDict.values() # if", "\"\"\" import sys, os sys.path.append(os.path.abspath('..')) from resources.classes import * from resources.globalv import *", "set(federatenames)] federateDict = {f.name: f for f in federates} # print(\"element names:\", nettopObj.elements)", "elements, linklist = linklist, destinations = nettopObj.destinations, storedtasks = [], newtasks = newtasks,", "f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print(\"new tasks:\", newtasks)", "= random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges = all_edges + newedges # print(newedges) #", "sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] federateDict = {f.name:", "outfile: pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes(): for numfederates, numelements, edgedivider in [(4,20,7),", "list(product(elementnames, elementnames)) if (a != b and not (a in destinations))] all_edges =", "Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in allProcs: a.join() finalDict = {} for", "seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist", "(sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for f", "lambda x: x.auctionscore, reverse = True)[:10] # print(filename, [e.auctionscore for e in toplist])", "with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile) if __name__ == '__main__':", "federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e for e", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "federates: # f.cash = 0 # f.sharelinkcost = sharelinkcost # f.uselinkcost = uselinkcost", "for obj in objlist} with open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile) # with", "in allProcs: a.join() finalDict = {} for proc in range(nproc): tempfilename = filename[:-2]", "list(hashNetworkDict.items()): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks():", "edgePriceDict, solutionObj = solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)]", "nettopObj.elements) elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f])", "as infile: hashNetworkDict = pickle.load(infile) # topollist = hashNetworkDict.values() # if len(hashNetworkDict) <", "= {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print(\"new", "edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider)", "# print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse = True)[:10]", "= [n*['f%d'%i] for i, n in enumerate(numberfederates)] federatenames = [e for l in", "# filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames = ['e%d'%(i+1) for", "print(nume2d) if nume2d>0: newedges = random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges = all_edges +", "0) for f in set(federatenames)] # federateDict = {f.name: f for f in", "parser.parse_args() argsdict = vars(args) nproc = argsdict['nproc'] time = 0 # basecost =", "# print(len(newtasks)) # print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict,", "obj in objDict.items(): # hashNetworkDict[h] = obj # with open(filename, 'wb') as outfile:", "edge in all_edges: s, d = edge # if destin_count > len(satellites): #", "(a != b and not (a in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin", "as infile: hashNetworkDict = pickle.load(infile) for h, obj in list(hashNetworkDict.items()): finalDict[h] = obj", "hashNetworkDict = {h: obj for h,obj in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict =", "while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist = [n*['f%d'%i] for i,", "= filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for", "(numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() #", "__name__ == '__main__': parser = argparse.ArgumentParser(description=\"This processed raw data of twitter.\") parser.add_argument('--nproc', type=int,", "all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations) if tempNetTop.hashid", "# print(\"length of cost value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost,", "numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources]) destinations", "topol in hashNetworkDict.items(): costdict = topol.costValueDict maxtup = (0, 1000) for mintup in", "newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time +", "os.path.isdir(\"/home/abbas.ehsanfar/gurobi\"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist): #", "nettopObj.destinations] # # sources = nettopObj.sources # # print([s.name for s in sources])", "[Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in", "list(product(elementnames, elementnames)) if (a != b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = []", "# print(filename, [e.auctionscore for e in toplist]) with open(filename[:-2] + '_top10.p', 'wb') as", "= fedPriceDict, fedValDict = {f: 0 for f in fedPriceDict.keys()}, edgelist = [])", "fedPriceDict = fedPriceDict, fedValDict = {f: 0 for f in fedPriceDict.keys()}, edgelist =", "obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) for proc in range(nproc): os.remove(filename[:-2]", "size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] # time", "l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a, b", "f in federates: # f.cash = 0 # f.sharelinkcost = sharelinkcost # f.uselinkcost", "= {fname: (sharelinkcost, uselinkcost) for fname in federatenames} federates = [Federate(name = f,", "= \"/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic\"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist): # print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if", "reverse = True)[:10] # print(filename, [e.auctionscore for e in toplist]) with open(filename[:-2] +", "N = len(topollist) allProcs = [] for proc in range(nproc): tempfilename = filename[:-2]", "type=int, default=3, help='cores on server') parser.add_argument('--n', type=int, default=3, help='cores on server') args =", "allProcs = [] for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds", "capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in", "Manager import argparse dir_topologies = os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global seedlist, filename,", "# elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f])", "for k, topol in hashNetworkDict.items(): costdict = topol.costValueDict maxtup = (0, 1000) for", "edgedivider) # elementnames = ['e%d'%(i+1) for i in range(numelements)] # createNetTopologies() # multiProcCostValue()", "hashNetworkDict = pickle.load(infile) hashNetworkDict = {h: obj for h,obj in hashNetworkDict.items() if obj.costValueDict}", "hashNetworkDict = pickle.load(infile) # topollist = hashNetworkDict.values() # if len(hashNetworkDict) < 1000: #", "* from collections import defaultdict, Counter from itertools import product import pickle import", "== 0: # for f in federates: # f.cash = 0 # f.sharelinkcost", "outfile) with open(tempfilename, 'wb') as outfile: objDict = {obj.hashid: obj for obj in", "f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in", "outfile) # with open(filename, 'rb') as infile: # hashNetworkDict = pickle.load(infile) # for", "e2) in nettopObj.edges] time = 0 newtasks = [Task(id = id + n,", "compliance with the License. You may obtain a copy of the License at", "nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for f in federates: # f.cash", "in hashNetworkDict.items(): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) for", "= parser.parse_args() argsdict = vars(args) nproc = argsdict['nproc'] time = 0 # basecost", "# print(\"shuffle:\", federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]),", "- len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges = random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges", "in sources]) linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity,", "(satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b) for", "if tempNetTop.hashid not in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename,", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "for e, f in zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name = f, cash", "# print(\"New topoology:\", nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0: objDict = {obj.hashid: obj", "nume2d) # print(len(all_edges)) all_edges = all_edges + newedges # print(newedges) # print(len(all_edges)) all_edge_set", "[Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for", "applicable law or agreed to in writing, software distributed under the License is", "uselinkcost) # print(\"length of cost value dict:\", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if", "linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size =", "in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a", "lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in", "for f in federates} # # print(\"element names:\", nettopObj.elements) # elements = [Element(name", "e in elements} # sources = [e for e in elements if e.name", "permissions and limitations under the License. \"\"\" import sys, os sys.path.append(os.path.abspath('..')) from resources.classes", "elementDict[e2].owner) for (e1, e2) in nettopObj.edges] # time = 0 # newtasks =", "federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]),", "federates} # print(\"element names:\", nettopObj.elements) elements = [Element(name = e, capacity=elementcapacity, size =", "for f in set(federatenames)] federateDict = {f.name: f for f in federates} #", "for f in federates: # f.cash = 0 # f.sharelinkcost = sharelinkcost #", "in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb') as outfile:", "= sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0" ]
[ "the last reward from every done task (also true for failed tasks) step_rew", "= False, verbose: bool = False): \"\"\" Constructor :param tasks: sequence of tasks", "True if verbose: print_cbt(f'Task {self._idx_curr} has failed (is done) at state {state}', 'r')", "\"\"\" if not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property def", "numpy as np from copy import deepcopy from typing import Sequence import pyrado", "not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property def rew_fcn(self) ->", "+ final_rew def compute_final_rew(self, state: np.ndarray, remaining_steps: int) -> float: \"\"\" Compute the", "Task has not been marked done yet, but is now done if self._tasks[self._idx_curr].has_succeeded(state):", "= self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give a reward for completing the task defined", "that all tasks have the same env_spec @property def tasks(self) -> Sequence[Task]: \"\"\"", "reward values for done tasks if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) # doesn't work", "of tasks. \"\"\" return deepcopy(self._tasks) @property def idx_curr(self) -> int: \"\"\" Get the", "import RewFcn from pyrado.utils.input_output import print_cbt class SequentialTasks(Task): \"\"\" Task class for a", "Get the desired space the current task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self,", "function only looks at the immediate sub-tasks. :param state: current state of the", "np.ndarray, act: np.ndarray, remaining_steps: int, verbose: bool = False) -> float: \"\"\" Check", "all sub-tasks are successful. :param state: environments current state :return: `True` if succeeded", "reward from the current task. \"\"\" step_rew = 0. if self.hold_rew_when_done: for i", "verbose: print messages on success or failure :return: final return of the current", "completed task self.failed_tasks[self._idx_curr] = True if verbose: print_cbt(f'Task {self._idx_curr} has failed (is done)", "= state_des @property def space_des(self) -> Space: \"\"\" Get the desired space the", "the reward function of the current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state:", "verbose: print messages on task completion .. note:: `hold_rew_when_done=True` only makes sense for", "verbose def __len__(self) -> int: return len(self._tasks) @property def env_spec(self) -> EnvSpec: return", "looks at the immediate sub-tasks. :param state: current state of the environment :param", "int: \"\"\" Get the index of the currently active task. \"\"\" return self._idx_curr", "or self.failed_tasks[self._idx_curr]): # Only give step reward if current sub-task is active step_rew", "note:: The `ParallelTasks` class is not a subclass of `TaskWrapper`, i.e. this function", "completion .. note:: `hold_rew_when_done=True` only makes sense for positive rewards. \"\"\" self._tasks =", "{state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully completed task self.failed_tasks[self._idx_curr] = True", "1}') self._idx_curr = idx @property def state_des(self) -> np.ndarray: \"\"\" Get the desired", "= 0. for t in self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps) return sum_final_rew def", "if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True # Reset the stored reward values", "tasks. \"\"\" return deepcopy(self._tasks) @property def idx_curr(self) -> int: \"\"\" Get the index", "np.ndarray) -> bool: \"\"\" Check if this tasks is done. The SequentialTasks is", "as np from copy import deepcopy from typing import Sequence import pyrado from", "self.logger.add_value('successful tasks', self.successful_tasks) return step_rew + final_rew def compute_final_rew(self, state: np.ndarray, remaining_steps: int)", "`True` if succeeded \"\"\" successful = np.all(self.succeeded_tasks) if successful and self.verbose: print_cbt(f'All {len(self)}", "\"\"\" return self._idx_curr @idx_curr.setter def idx_curr(self, idx: int): \"\"\" Set the index of", "= self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to the next task self.idx_curr = (self._idx_curr +", "remaining_steps) # Advance to the next task self.idx_curr = (self._idx_curr + 1) %", "pyrado.tasks.base import Task from pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output import print_cbt class SequentialTasks(Task):", "-> EnvSpec: return self._tasks[0].env_spec # safe to assume that all tasks have the", "Add the last reward from every done task (also true for failed tasks)", "def idx_curr(self, idx: int): \"\"\" Set the index of the currently active task.", "sum_final_rew += t.compute_final_rew(state, remaining_steps) return sum_final_rew def reset(self, **kwargs): \"\"\" Reset all tasks.", "action :param remaining_steps: number of time steps left in the episode :param verbose:", "def has_succeeded(self, state: np.ndarray) -> bool: \"\"\" Check if this tasks is done.", "\"\"\" Get the index of the currently active task. \"\"\" return self._idx_curr @idx_curr.setter", "True # check off tasks which are before the start task self.hold_rew_when_done =", "verbose: bool = False): \"\"\" Constructor :param tasks: sequence of tasks a.k.a. goals,", "state_des(self) -> np.ndarray: \"\"\" Get the desired state the current task. \"\"\" return", "def space_des(self, space_des: Space): \"\"\" Set the desired space the current task. \"\"\"", "/ cost on task completion / fail of this task. Since this task", ":return: final return of the current subtask \"\"\" if not self.succeeded_tasks[self._idx_curr] and not", "is done. The SequentialTasks is successful if all sub-tasks are successful. :param state:", "with, by default with the first one in the list :param hold_rew_when_done: if", "tasks will be stored and added every step :param verbose: print messages on", "def idx_curr(self) -> int: \"\"\" Get the index of the currently active task.", "SequentialTasks is successful if all sub-tasks are successful. :param state: environments current state", "np.ndarray: \"\"\" Get the desired state the current task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter", "self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task has not been marked done yet, but is", "current task. \"\"\" if not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des", "one in the list :param hold_rew_when_done: if `True` reward values for done tasks", "float: \"\"\" Check if the current task is done. If so, move to", "the final reward of this task. :param state: current state :param act: current", "the immediate sub-tasks. :param state: current state of the environment :param remaining_steps: number", "reward if current sub-task is active step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew =", "done) at state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully completed task", "has failed (is done) at state {state}', 'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither", "the desired space the current task. \"\"\" if not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des,", "self.failed_tasks[i]: # Add the last reward from every done task (also true for", "the step reward from the current task. \"\"\" step_rew = 0. if self.hold_rew_when_done:", "-> bool: \"\"\" Check if this tasks is done. The SequentialTasks is successful", "= True # check off tasks which are before the start task self.hold_rew_when_done", "reward values for done tasks will be stored and added every step :param", "with start_idx def _is_curr_task_done(self, state: np.ndarray, act: np.ndarray, remaining_steps: int, verbose: bool =", "remaining_steps: int) -> float: \"\"\" Get the step reward from the current task.", "compute_final_rew(self, state: np.ndarray, remaining_steps: int) -> float: \"\"\" Compute the reward / cost", "np.full(len(self), False, dtype=bool) if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True # Reset the", "(is done) at state {state}', 'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or", "self._tasks[0].env_spec # safe to assume that all tasks have the same env_spec @property", "self._idx_curr = start_idx self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool)", "desired state the current task. \"\"\" if not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray)", "state the current task. \"\"\" if not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des", "before the start task self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose", "of the task to start with, by default with the first one in", "this task holds multiple sub-tasks, the final reward / cost is computed for", "if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) # doesn't work with start_idx def _is_curr_task_done(self, state:", "of tasks a.k.a. goals \"\"\" def __init__(self, tasks: Sequence[Task], start_idx: int = 0,", "self.failed_tasks[self._idx_curr]): # Only give step reward if current sub-task is active step_rew +=", "self._tasks[self._idx_curr].is_done(state): # Task has not been marked done yet, but is now done", "Task class for a sequence of tasks a.k.a. goals \"\"\" def __init__(self, tasks:", "from copy import deepcopy from typing import Sequence import pyrado from pyrado.spaces.base import", "def __len__(self) -> int: return len(self._tasks) @property def env_spec(self) -> EnvSpec: return self._tasks[0].env_spec", "a.k.a. goals, the order matters :param start_idx: index of the task to start", "@property def idx_curr(self) -> int: \"\"\" Get the index of the currently active", "Reset all tasks. \"\"\" self.idx_curr = 0 for s in self._tasks: s.reset(**kwargs) #", "\"\"\" if not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property def", "print_cbt(f'task {self._idx_curr} has succeeded (is done) at state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): #", "import numpy as np from copy import deepcopy from typing import Sequence import", "if not (0 <= idx < len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}')", "current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int)", "def rew_fcn(self) -> RewFcn: \"\"\" Get the reward function of the current task.", "step :param verbose: print messages on task completion .. note:: `hold_rew_when_done=True` only makes", "if all sub-tasks are successful. :param state: environments current state :return: `True` if", "print messages on task completion .. note:: `hold_rew_when_done=True` only makes sense for positive", "else: task_final_rew = 0. return task_final_rew def has_succeeded(self, state: np.ndarray) -> bool: \"\"\"", "self.succeeded_tasks[:start_idx] = True # check off tasks which are before the start task", "from pyrado.utils.input_output import print_cbt class SequentialTasks(Task): \"\"\" Task class for a sequence of", "task. \"\"\" if not (0 <= idx < len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self)", "pyrado.utils.data_types import EnvSpec from pyrado.tasks.base import Task from pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output", "the order matters :param start_idx: index of the task to start with, by", "reward function of the current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray,", "state: np.ndarray, remaining_steps: int) -> float: \"\"\" Compute the reward / cost on", "the current task. \"\"\" if not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des =", "space_des(self) -> Space: \"\"\" Get the desired space the current task. \"\"\" return", "done task (also true for failed tasks) step_rew += self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr]", "len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr = idx @property def state_des(self)", "def _is_curr_task_done(self, state: np.ndarray, act: np.ndarray, remaining_steps: int, verbose: bool = False) ->", "# Task has not been marked done yet, but is now done if", "step_rew + final_rew def compute_final_rew(self, state: np.ndarray, remaining_steps: int) -> float: \"\"\" Compute", "return sum_final_rew def reset(self, **kwargs): \"\"\" Reset all tasks. \"\"\" self.idx_curr = 0", "only makes sense for positive rewards. \"\"\" self._tasks = deepcopy(tasks) self._idx_curr = start_idx", "is not a subclass of `TaskWrapper`, i.e. this function only looks at the", "SequentialTasks(Task): \"\"\" Task class for a sequence of tasks a.k.a. goals \"\"\" def", "self._is_curr_task_done(state, act, remaining_steps) # zero if the task is not done # self.logger.add_value('successful", "self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to the next task self.idx_curr = (self._idx_curr + 1)", "return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des: np.ndarray): \"\"\" Set the desired state the", "Space): \"\"\" Set the desired space the current task. \"\"\" if not isinstance(space_des,", "of time steps left in the episode :param verbose: print messages on success", "# doesn't work with start_idx def _is_curr_task_done(self, state: np.ndarray, act: np.ndarray, remaining_steps: int,", "the next one and return the final reward of this task. :param state:", "\"\"\" def __init__(self, tasks: Sequence[Task], start_idx: int = 0, hold_rew_when_done: bool = False,", "is successful if all sub-tasks are successful. :param state: environments current state :return:", "them, too. .. note:: The `ParallelTasks` class is not a subclass of `TaskWrapper`,", "note:: `hold_rew_when_done=True` only makes sense for positive rewards. \"\"\" self._tasks = deepcopy(tasks) self._idx_curr", "= deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self),", "desired space the current task. \"\"\" if not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space)", "@property def tasks(self) -> Sequence[Task]: \"\"\" Get the list of tasks. \"\"\" return", "= verbose def __len__(self) -> int: return len(self._tasks) @property def env_spec(self) -> EnvSpec:", "left in the episode :return: final reward of all sub-tasks \"\"\" sum_final_rew =", "state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully completed task self.failed_tasks[self._idx_curr] =", "is done!') # Memorize current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0)", "def space_des(self) -> Space: \"\"\" Get the desired space the current task. \"\"\"", "1) % len(self) else: task_final_rew = 0. return task_final_rew def has_succeeded(self, state: np.ndarray)", "the desired state the current task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des:", "act: current action :param remaining_steps: number of time steps left in the episode", "self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose = verbose def __len__(self)", "a.k.a. goals \"\"\" def __init__(self, tasks: Sequence[Task], start_idx: int = 0, hold_rew_when_done: bool", "the same env_spec @property def tasks(self) -> Sequence[Task]: \"\"\" Get the list of", "int) -> float: \"\"\" Get the step reward from the current task. \"\"\"", "at state {state}', 'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed but", "current state of the environment :param remaining_steps: number of time steps left in", "Check off unsuccessfully completed task self.failed_tasks[self._idx_curr] = True if verbose: print_cbt(f'Task {self._idx_curr} has", "**kwargs): \"\"\" Reset all tasks. \"\"\" self.idx_curr = 0 for s in self._tasks:", "le_constraint=f'{len(self) - 1}') self._idx_curr = idx @property def state_des(self) -> np.ndarray: \"\"\" Get", "in self._tasks: s.reset(**kwargs) # Reset internal check list for done tasks self.succeeded_tasks =", "success or failure :return: final return of the current subtask \"\"\" if not", "= start_idx self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx]", "self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose = verbose def __len__(self) -> int: return len(self._tasks)", "Memorize current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give a", "the next task self.idx_curr = (self._idx_curr + 1) % len(self) else: task_final_rew =", "defined by the task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to the next", "\"\"\" Get the list of tasks. \"\"\" return deepcopy(self._tasks) @property def idx_curr(self) ->", "\"\"\" if not (0 <= idx < len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) -", "self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) # doesn't work with start_idx def _is_curr_task_done(self, state: np.ndarray,", "current task is done. If so, move to the next one and return", "@property def rew_fcn(self) -> RewFcn: \"\"\" Get the reward function of the current", "current task. \"\"\" if not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des", "is active step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew = self._is_curr_task_done(state, act, remaining_steps) #", "= np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] = True # check off tasks which are", "Reset the stored reward values for done tasks if self.hold_rew_when_done: self.held_rews = np.zeros(len(self))", "\"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des: Space): \"\"\" Set the desired space", "False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] =", "sub-tasks are successful. :param state: environments current state :return: `True` if succeeded \"\"\"", "not been marked done yet, but is now done if self._tasks[self._idx_curr].has_succeeded(state): # Check", "by the task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to the next task", "else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed but is done!') # Memorize", "return len(self._tasks) @property def env_spec(self) -> EnvSpec: return self._tasks[0].env_spec # safe to assume", "the currently active task. \"\"\" if not (0 <= idx < len(self)): raise", "return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int) -> float: \"\"\"", "of this task. :param state: current state :param act: current action :param remaining_steps:", "-> Sequence[Task]: \"\"\" Get the list of tasks. \"\"\" return deepcopy(self._tasks) @property def", "\"\"\" step_rew = 0. if self.hold_rew_when_done: for i in range(len(self)): # Iterate over", "the final reward / cost is computed for them, too. .. note:: The", "import deepcopy from typing import Sequence import pyrado from pyrado.spaces.base import Space from", "will be stored and added every step :param verbose: print messages on task", "task_final_rew = 0. return task_final_rew def has_succeeded(self, state: np.ndarray) -> bool: \"\"\" Check", "i.e. this function only looks at the immediate sub-tasks. :param state: current state", "verbose: print_cbt(f'task {self._idx_curr} has succeeded (is done) at state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state):", "reset(self, **kwargs): \"\"\" Reset all tasks. \"\"\" self.idx_curr = 0 for s in", "Space: \"\"\" Get the desired space the current task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter", "bool = False): \"\"\" Constructor :param tasks: sequence of tasks a.k.a. goals, the", "self.successful_tasks) return step_rew + final_rew def compute_final_rew(self, state: np.ndarray, remaining_steps: int) -> float:", "raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property def space_des(self) -> Space: \"\"\" Get", "if not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property def rew_fcn(self)", "reward for completing the task defined by the task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps)", "tasks which are before the start task self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done: self.held_rews", "self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully completed task self.failed_tasks[self._idx_curr] = True if verbose: print_cbt(f'Task", "# Add the last reward from every done task (also true for failed", "\"\"\" Task class for a sequence of tasks a.k.a. goals \"\"\" def __init__(self,", "self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int) -> float: \"\"\" Get", "0, hold_rew_when_done: bool = False, verbose: bool = False): \"\"\" Constructor :param tasks:", "in the list :param hold_rew_when_done: if `True` reward values for done tasks will", "\"\"\" return deepcopy(self._tasks) @property def idx_curr(self) -> int: \"\"\" Get the index of", "first one in the list :param hold_rew_when_done: if `True` reward values for done", "at the immediate sub-tasks. :param state: current state of the environment :param remaining_steps:", "the task to start with, by default with the first one in the", "np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] = True # check", "if the task is not done # self.logger.add_value('successful tasks', self.successful_tasks) return step_rew +", "+= t.compute_final_rew(state, remaining_steps) return sum_final_rew def reset(self, **kwargs): \"\"\" Reset all tasks. \"\"\"", "current task. \"\"\" step_rew = 0. if self.hold_rew_when_done: for i in range(len(self)): #", "sum_final_rew = 0. for t in self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps) return sum_final_rew", "every step :param verbose: print messages on task completion .. note:: `hold_rew_when_done=True` only", "for done tasks self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool)", ":param act: current action :param remaining_steps: number of time steps left in the", "and return the final reward of this task. :param state: current state :param", "dtype=bool) if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True # Reset the stored reward", "done. If so, move to the next one and return the final reward", "start_idx: index of the task to start with, by default with the first", "EnvSpec: return self._tasks[0].env_spec # safe to assume that all tasks have the same", "completed task self.succeeded_tasks[self._idx_curr] = True if verbose: print_cbt(f'task {self._idx_curr} has succeeded (is done)", "values for done tasks if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) # doesn't work with", "state :return: `True` if succeeded \"\"\" successful = np.all(self.succeeded_tasks) if successful and self.verbose:", "goals \"\"\" def __init__(self, tasks: Sequence[Task], start_idx: int = 0, hold_rew_when_done: bool =", "print messages on success or failure :return: final return of the current subtask", "reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give a reward for", "stored and added every step :param verbose: print messages on task completion ..", "not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property def space_des(self) ->", "self.idx_curr = (self._idx_curr + 1) % len(self) else: task_final_rew = 0. return task_final_rew", "to assume that all tasks have the same env_spec @property def tasks(self) ->", "np.ndarray): \"\"\" Set the desired state the current task. \"\"\" if not isinstance(state_des,", "reward of all sub-tasks \"\"\" sum_final_rew = 0. for t in self._tasks: sum_final_rew", "tasks self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) if 'start_idx'", "@space_des.setter def space_des(self, space_des: Space): \"\"\" Set the desired space the current task.", "Task from pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output import print_cbt class SequentialTasks(Task): \"\"\" Task", "remaining_steps: int, verbose: bool = False) -> float: \"\"\" Check if the current", "to the next one and return the final reward of this task. :param", "self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew = self._is_curr_task_done(state, act, remaining_steps) # zero if the task", "or failure :return: final return of the current subtask \"\"\" if not self.succeeded_tasks[self._idx_curr]", "pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property def rew_fcn(self) -> RewFcn: \"\"\" Get the", "for positive rewards. \"\"\" self._tasks = deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks = np.full(len(self),", "start with, by default with the first one in the list :param hold_rew_when_done:", "pyrado.spaces.base import Space from pyrado.utils.data_types import EnvSpec from pyrado.tasks.base import Task from pyrado.tasks.reward_functions", "_is_curr_task_done(self, state: np.ndarray, act: np.ndarray, remaining_steps: int, verbose: bool = False) -> float:", "state {state}', 'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed but is", "but is now done if self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully completed task self.succeeded_tasks[self._idx_curr]", "assume that all tasks have the same env_spec @property def tasks(self) -> Sequence[Task]:", "space the current task. \"\"\" if not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des", "self._tasks[self._idx_curr].space_des = space_des @property def rew_fcn(self) -> RewFcn: \"\"\" Get the reward function", "successful. :param state: environments current state :return: `True` if succeeded \"\"\" successful =", "return self._idx_curr @idx_curr.setter def idx_curr(self, idx: int): \"\"\" Set the index of the", "i in range(len(self)): # Iterate over previous tasks if self.succeeded_tasks[i] or self.failed_tasks[i]: #", "# Reset the stored reward values for done tasks if self.hold_rew_when_done: self.held_rews =", "< len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr = idx @property def", "successfully completed task self.succeeded_tasks[self._idx_curr] = True if verbose: print_cbt(f'task {self._idx_curr} has succeeded (is", "final_rew def compute_final_rew(self, state: np.ndarray, remaining_steps: int) -> float: \"\"\" Compute the reward", "tasks(self) -> Sequence[Task]: \"\"\" Get the list of tasks. \"\"\" return deepcopy(self._tasks) @property", "float: \"\"\" Compute the reward / cost on task completion / fail of", "of the current subtask \"\"\" if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state):", "step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew = self._is_curr_task_done(state, act, remaining_steps) # zero if", "len(self._tasks) @property def env_spec(self) -> EnvSpec: return self._tasks[0].env_spec # safe to assume that", "current task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des: np.ndarray): \"\"\" Set the", "makes sense for positive rewards. \"\"\" self._tasks = deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks", "if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give a reward for completing", "/ fail of this task. Since this task holds multiple sub-tasks, the final", "over previous tasks if self.succeeded_tasks[i] or self.failed_tasks[i]: # Add the last reward from", "of tasks a.k.a. goals, the order matters :param start_idx: index of the task", "state: np.ndarray, act: np.ndarray, remaining_steps: int) -> float: \"\"\" Get the step reward", "zero if the task is not done # self.logger.add_value('successful tasks', self.successful_tasks) return step_rew", "task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to the next task self.idx_curr = (self._idx_curr", "int): \"\"\" Set the index of the currently active task. \"\"\" if not", "self._idx_curr = idx @property def state_des(self) -> np.ndarray: \"\"\" Get the desired state", "successful and self.verbose: print_cbt(f'All {len(self)} sequential sub-tasks are done successfully', 'g') return successful", "self.failed_tasks[self._idx_curr] = True if verbose: print_cbt(f'Task {self._idx_curr} has failed (is done) at state", "= 0, hold_rew_when_done: bool = False, verbose: bool = False): \"\"\" Constructor :param", "the currently active task. \"\"\" return self._idx_curr @idx_curr.setter def idx_curr(self, idx: int): \"\"\"", "state the current task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des: np.ndarray): \"\"\"", "sub-tasks, the final reward / cost is computed for them, too. .. note::", "kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True # Reset the stored reward values for done tasks", "state: np.ndarray, act: np.ndarray, remaining_steps: int, verbose: bool = False) -> float: \"\"\"", "pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output import print_cbt class SequentialTasks(Task): \"\"\" Task class for", "not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task has not been marked done yet, but", "of `TaskWrapper`, i.e. this function only looks at the immediate sub-tasks. :param state:", "np.ndarray, act: np.ndarray, remaining_steps: int) -> float: \"\"\" Get the step reward from", "last reward from every done task (also true for failed tasks) step_rew +=", "multiple sub-tasks, the final reward / cost is computed for them, too. ..", "current task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des: Space): \"\"\" Set the", "{self._idx_curr} has succeeded (is done) at state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): # Check", "return step_rew + final_rew def compute_final_rew(self, state: np.ndarray, remaining_steps: int) -> float: \"\"\"", "state_des @property def space_des(self) -> Space: \"\"\" Get the desired space the current", "off unsuccessfully completed task self.failed_tasks[self._idx_curr] = True if verbose: print_cbt(f'Task {self._idx_curr} has failed", "self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only give step reward if current", "self.held_rews = np.zeros(len(self)) self.verbose = verbose def __len__(self) -> int: return len(self._tasks) @property", "False, verbose: bool = False): \"\"\" Constructor :param tasks: sequence of tasks a.k.a.", "messages on success or failure :return: final return of the current subtask \"\"\"", "of time steps left in the episode :return: final reward of all sub-tasks", "`ParallelTasks` class is not a subclass of `TaskWrapper`, i.e. this function only looks", "if successful and self.verbose: print_cbt(f'All {len(self)} sequential sub-tasks are done successfully', 'g') return", "in self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps) return sum_final_rew def reset(self, **kwargs): \"\"\" Reset", "this task. Since this task holds multiple sub-tasks, the final reward / cost", "raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property def rew_fcn(self) -> RewFcn: \"\"\" Get", "float: \"\"\" Get the step reward from the current task. \"\"\" step_rew =", "import Space from pyrado.utils.data_types import EnvSpec from pyrado.tasks.base import Task from pyrado.tasks.reward_functions import", "-> RewFcn: \"\"\" Get the reward function of the current task. \"\"\" return", "= 0 for s in self._tasks: s.reset(**kwargs) # Reset internal check list for", "\"\"\" successful = np.all(self.succeeded_tasks) if successful and self.verbose: print_cbt(f'All {len(self)} sequential sub-tasks are", "class is not a subclass of `TaskWrapper`, i.e. this function only looks at", "Get the desired state the current task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self,", "The SequentialTasks is successful if all sub-tasks are successful. :param state: environments current", "task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to the next task self.idx_curr =", "sense for positive rewards. \"\"\" self._tasks = deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks =", "from pyrado.spaces.base import Space from pyrado.utils.data_types import EnvSpec from pyrado.tasks.base import Task from", "the task defined by the task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to", "# Advance to the next task self.idx_curr = (self._idx_curr + 1) % len(self)", "@property def env_spec(self) -> EnvSpec: return self._tasks[0].env_spec # safe to assume that all", "are before the start task self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done: self.held_rews = np.zeros(len(self))", "False, dtype=bool) if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True # Reset the stored", "self.failed_tasks = np.full(len(self), False, dtype=bool) if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True #", "task self.failed_tasks[self._idx_curr] = True if verbose: print_cbt(f'Task {self._idx_curr} has failed (is done) at", "bool = False) -> float: \"\"\" Check if the current task is done.", "ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr = idx @property def state_des(self) -> np.ndarray: \"\"\"", "import EnvSpec from pyrado.tasks.base import Task from pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output import", "range(len(self)): # Iterate over previous tasks if self.succeeded_tasks[i] or self.failed_tasks[i]: # Add the", "current action :param remaining_steps: number of time steps left in the episode :param", "number of time steps left in the episode :return: final reward of all", "state: environments current state :return: `True` if succeeded \"\"\" successful = np.all(self.succeeded_tasks) if", "bool: \"\"\" Check if this tasks is done. The SequentialTasks is successful if", "tasks if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) # doesn't work with start_idx def _is_curr_task_done(self,", "return task_final_rew def has_succeeded(self, state: np.ndarray) -> bool: \"\"\" Check if this tasks", "from pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output import print_cbt class SequentialTasks(Task): \"\"\" Task class", "# zero if the task is not done # self.logger.add_value('successful tasks', self.successful_tasks) return", "the current task. \"\"\" if not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des =", "tasks: sequence of tasks a.k.a. goals, the order matters :param start_idx: index of", "if verbose: print_cbt(f'Task {self._idx_curr} has failed (is done) at state {state}', 'r') else:", "act, remaining_steps=0) # Give a reward for completing the task defined by the", "index of the task to start with, by default with the first one", "deepcopy(self._tasks) @property def idx_curr(self) -> int: \"\"\" Get the index of the currently", "has_succeeded(self, state: np.ndarray) -> bool: \"\"\" Check if this tasks is done. The", "\"\"\" Check if this tasks is done. The SequentialTasks is successful if all", "state: current state :param act: current action :param remaining_steps: number of time steps", "final reward of this task. :param state: current state :param act: current action", "\"\"\" if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task has not", "done) at state {state}', 'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed", "RewFcn from pyrado.utils.input_output import print_cbt class SequentialTasks(Task): \"\"\" Task class for a sequence", "= False): \"\"\" Constructor :param tasks: sequence of tasks a.k.a. goals, the order", "isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property def space_des(self) -> Space:", "-> float: \"\"\" Check if the current task is done. If so, move", "class SequentialTasks(Task): \"\"\" Task class for a sequence of tasks a.k.a. goals \"\"\"", "hold_rew_when_done: if `True` reward values for done tasks will be stored and added", "computed for them, too. .. note:: The `ParallelTasks` class is not a subclass", "remaining_steps) final_rew = self._is_curr_task_done(state, act, remaining_steps) # zero if the task is not", "EnvSpec from pyrado.tasks.base import Task from pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output import print_cbt", "def state_des(self, state_des: np.ndarray): \"\"\" Set the desired state the current task. \"\"\"", "print_cbt class SequentialTasks(Task): \"\"\" Task class for a sequence of tasks a.k.a. goals", "bool = False, verbose: bool = False): \"\"\" Constructor :param tasks: sequence of", "reward / cost is computed for them, too. .. note:: The `ParallelTasks` class", "task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des: Space): \"\"\" Set the desired", "-> float: \"\"\" Get the step reward from the current task. \"\"\" step_rew", "env_spec(self) -> EnvSpec: return self._tasks[0].env_spec # safe to assume that all tasks have", "Set the index of the currently active task. \"\"\" if not (0 <=", "space_des: Space): \"\"\" Set the desired space the current task. \"\"\" if not", "the first one in the list :param hold_rew_when_done: if `True` reward values for", "been marked done yet, but is now done if self._tasks[self._idx_curr].has_succeeded(state): # Check off", "the stored reward values for done tasks if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) #", "this function only looks at the immediate sub-tasks. :param state: current state of", "environment :param remaining_steps: number of time steps left in the episode :return: final", ":return: `True` if succeeded \"\"\" successful = np.all(self.succeeded_tasks) if successful and self.verbose: print_cbt(f'All", "sub-task is active step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew = self._is_curr_task_done(state, act, remaining_steps)", "np.all(self.succeeded_tasks) if successful and self.verbose: print_cbt(f'All {len(self)} sequential sub-tasks are done successfully', 'g')", "dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True", "`True` reward values for done tasks will be stored and added every step", "hold_rew_when_done: bool = False, verbose: bool = False): \"\"\" Constructor :param tasks: sequence", "task. \"\"\" if not isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property", "@property def state_des(self) -> np.ndarray: \"\"\" Get the desired state the current task.", "on task completion / fail of this task. Since this task holds multiple", "sequence of tasks a.k.a. goals, the order matters :param start_idx: index of the", "task to start with, by default with the first one in the list", "act: np.ndarray, remaining_steps: int) -> float: \"\"\" Get the step reward from the", "the task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to the next task self.idx_curr", "tasks a.k.a. goals, the order matters :param start_idx: index of the task to", "are successful. :param state: environments current state :return: `True` if succeeded \"\"\" successful", "current subtask \"\"\" if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task", "self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des: np.ndarray): \"\"\" Set the desired state the current", ":param start_idx: index of the task to start with, by default with the", "\"\"\" Get the step reward from the current task. \"\"\" step_rew = 0.", "+= self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew = self._is_curr_task_done(state, act, remaining_steps) # zero if the", "the current task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des: np.ndarray): \"\"\" Set", "self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully completed task self.succeeded_tasks[self._idx_curr] = True if verbose: print_cbt(f'task", "if this tasks is done. The SequentialTasks is successful if all sub-tasks are", "Get the reward function of the current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self,", "done!') # Memorize current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) #", "of the currently active task. \"\"\" if not (0 <= idx < len(self)):", "of the currently active task. \"\"\" return self._idx_curr @idx_curr.setter def idx_curr(self, idx: int):", "def tasks(self) -> Sequence[Task]: \"\"\" Get the list of tasks. \"\"\" return deepcopy(self._tasks)", "int: return len(self._tasks) @property def env_spec(self) -> EnvSpec: return self._tasks[0].env_spec # safe to", "every done task (also true for failed tasks) step_rew += self.held_rews[i] if not", "Sequence[Task]: \"\"\" Get the list of tasks. \"\"\" return deepcopy(self._tasks) @property def idx_curr(self)", "0. return task_final_rew def has_succeeded(self, state: np.ndarray) -> bool: \"\"\" Check if this", "s in self._tasks: s.reset(**kwargs) # Reset internal check list for done tasks self.succeeded_tasks", "or failed but is done!') # Memorize current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] =", "idx < len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr = idx @property", "task completion / fail of this task. Since this task holds multiple sub-tasks,", "by default with the first one in the list :param hold_rew_when_done: if `True`", "currently active task. \"\"\" return self._idx_curr @idx_curr.setter def idx_curr(self, idx: int): \"\"\" Set", "task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int) ->", "have the same env_spec @property def tasks(self) -> Sequence[Task]: \"\"\" Get the list", "desired state the current task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des: np.ndarray):", "the current task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des: Space): \"\"\" Set", "self._tasks = deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks =", "the episode :return: final reward of all sub-tasks \"\"\" sum_final_rew = 0. for", "'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed but is done!') #", "Get the step reward from the current task. \"\"\" step_rew = 0. if", "int = 0, hold_rew_when_done: bool = False, verbose: bool = False): \"\"\" Constructor", "def __init__(self, tasks: Sequence[Task], start_idx: int = 0, hold_rew_when_done: bool = False, verbose:", "the environment :param remaining_steps: number of time steps left in the episode :return:", "at state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully completed task self.failed_tasks[self._idx_curr]", "\"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int) -> float:", "space_des(self, space_des: Space): \"\"\" Set the desired space the current task. \"\"\" if", "-> int: return len(self._tasks) @property def env_spec(self) -> EnvSpec: return self._tasks[0].env_spec # safe", "idx_curr(self, idx: int): \"\"\" Set the index of the currently active task. \"\"\"", "{state}', 'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed but is done!')", "the current task is done. If so, move to the next one and", ".. note:: `hold_rew_when_done=True` only makes sense for positive rewards. \"\"\" self._tasks = deepcopy(tasks)", "act, remaining_steps) # zero if the task is not done # self.logger.add_value('successful tasks',", "of this task. Since this task holds multiple sub-tasks, the final reward /", "tasks: Sequence[Task], start_idx: int = 0, hold_rew_when_done: bool = False, verbose: bool =", "sum_final_rew def reset(self, **kwargs): \"\"\" Reset all tasks. \"\"\" self.idx_curr = 0 for", "hold_rew_when_done if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose = verbose def __len__(self) -> int:", "on task completion .. note:: `hold_rew_when_done=True` only makes sense for positive rewards. \"\"\"", "if succeeded \"\"\" successful = np.all(self.succeeded_tasks) if successful and self.verbose: print_cbt(f'All {len(self)} sequential", "step_rew += self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only give step reward", "idx: int): \"\"\" Set the index of the currently active task. \"\"\" if", "(self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only give step reward if current sub-task is active", "\"\"\" self.idx_curr = 0 for s in self._tasks: s.reset(**kwargs) # Reset internal check", "a sequence of tasks a.k.a. goals \"\"\" def __init__(self, tasks: Sequence[Task], start_idx: int", "failed tasks) step_rew += self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only give", "\"\"\" sum_final_rew = 0. for t in self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps) return", "list for done tasks self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False,", "not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task has not been marked", "Sequence[Task], start_idx: int = 0, hold_rew_when_done: bool = False, verbose: bool = False):", "the list :param hold_rew_when_done: if `True` reward values for done tasks will be", "space the current task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des: Space): \"\"\"", "remaining_steps=0) # Give a reward for completing the task defined by the task", "elif self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully completed task self.failed_tasks[self._idx_curr] = True if verbose:", "for failed tasks) step_rew += self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only", "len(self) else: task_final_rew = 0. return task_final_rew def has_succeeded(self, state: np.ndarray) -> bool:", "tasks. \"\"\" self.idx_curr = 0 for s in self._tasks: s.reset(**kwargs) # Reset internal", "the episode :param verbose: print messages on success or failure :return: final return", "failed (is done) at state {state}', 'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded", "def state_des(self) -> np.ndarray: \"\"\" Get the desired state the current task. \"\"\"", ":param verbose: print messages on success or failure :return: final return of the", "-> Space: \"\"\" Get the desired space the current task. \"\"\" return self._tasks[self._idx_curr].space_des", ":param state: environments current state :return: `True` if succeeded \"\"\" successful = np.all(self.succeeded_tasks)", "subclass of `TaskWrapper`, i.e. this function only looks at the immediate sub-tasks. :param", "idx @property def state_des(self) -> np.ndarray: \"\"\" Get the desired state the current", "off tasks which are before the start task self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done:", "the index of the currently active task. \"\"\" if not (0 <= idx", "task defined by the task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance to the", "= np.zeros(len(self)) # doesn't work with start_idx def _is_curr_task_done(self, state: np.ndarray, act: np.ndarray,", "np.zeros(len(self)) # doesn't work with start_idx def _is_curr_task_done(self, state: np.ndarray, act: np.ndarray, remaining_steps:", "to the next task self.idx_curr = (self._idx_curr + 1) % len(self) else: task_final_rew", "(also true for failed tasks) step_rew += self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]):", "Check if the current task is done. If so, move to the next", "rewards. \"\"\" self._tasks = deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks = np.full(len(self), False, dtype=bool)", "# self.logger.add_value('successful tasks', self.successful_tasks) return step_rew + final_rew def compute_final_rew(self, state: np.ndarray, remaining_steps:", "left in the episode :param verbose: print messages on success or failure :return:", "@state_des.setter def state_des(self, state_des: np.ndarray): \"\"\" Set the desired state the current task.", "steps left in the episode :param verbose: print messages on success or failure", "remaining_steps: number of time steps left in the episode :param verbose: print messages", "next one and return the final reward of this task. :param state: current", "succeeded \"\"\" successful = np.all(self.succeeded_tasks) if successful and self.verbose: print_cbt(f'All {len(self)} sequential sub-tasks", "done yet, but is now done if self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully completed", "not a subclass of `TaskWrapper`, i.e. this function only looks at the immediate", "return deepcopy(self._tasks) @property def idx_curr(self) -> int: \"\"\" Get the index of the", "# Iterate over previous tasks if self.succeeded_tasks[i] or self.failed_tasks[i]: # Add the last", "print_cbt(f'Task {self._idx_curr} has failed (is done) at state {state}', 'r') else: raise pyrado.ValueErr(msg=f'Task", "list :param hold_rew_when_done: if `True` reward values for done tasks will be stored", "true for failed tasks) step_rew += self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): #", "= space_des @property def rew_fcn(self) -> RewFcn: \"\"\" Get the reward function of", "one and return the final reward of this task. :param state: current state", "(is done) at state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully completed", ":param remaining_steps: number of time steps left in the episode :param verbose: print", "+= self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only give step reward if", "task. :param state: current state :param act: current action :param remaining_steps: number of", "pyrado from pyrado.spaces.base import Space from pyrado.utils.data_types import EnvSpec from pyrado.tasks.base import Task", "\"\"\" Get the desired space the current task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def", "now done if self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully completed task self.succeeded_tasks[self._idx_curr] = True", "# Only give step reward if current sub-task is active step_rew += self._tasks[self._idx_curr].step_rew(state,", "np.zeros(len(self)) self.verbose = verbose def __len__(self) -> int: return len(self._tasks) @property def env_spec(self)", "Advance to the next task self.idx_curr = (self._idx_curr + 1) % len(self) else:", "isinstance(space_des, Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property def rew_fcn(self) -> RewFcn:", "np.ndarray, remaining_steps: int) -> float: \"\"\" Get the step reward from the current", "= 0. if self.hold_rew_when_done: for i in range(len(self)): # Iterate over previous tasks", "only looks at the immediate sub-tasks. :param state: current state of the environment", "Check off successfully completed task self.succeeded_tasks[self._idx_curr] = True if verbose: print_cbt(f'task {self._idx_curr} has", "steps left in the episode :return: final reward of all sub-tasks \"\"\" sum_final_rew", "not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only give step reward if current sub-task is", "def compute_final_rew(self, state: np.ndarray, remaining_steps: int) -> float: \"\"\" Compute the reward /", "tasks if self.succeeded_tasks[i] or self.failed_tasks[i]: # Add the last reward from every done", "False) -> float: \"\"\" Check if the current task is done. If so,", "with the first one in the list :param hold_rew_when_done: if `True` reward values", "added every step :param verbose: print messages on task completion .. note:: `hold_rew_when_done=True`", "is not done # self.logger.add_value('successful tasks', self.successful_tasks) return step_rew + final_rew def compute_final_rew(self,", "= np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] = True #", "int) -> float: \"\"\" Compute the reward / cost on task completion /", "of the environment :param remaining_steps: number of time steps left in the episode", "= False) -> float: \"\"\" Check if the current task is done. If", "Check if this tasks is done. The SequentialTasks is successful if all sub-tasks", "Sequence import pyrado from pyrado.spaces.base import Space from pyrado.utils.data_types import EnvSpec from pyrado.tasks.base", "np.ndarray, remaining_steps: int) -> float: \"\"\" Compute the reward / cost on task", "= True if verbose: print_cbt(f'task {self._idx_curr} has succeeded (is done) at state {state}',", "If so, move to the next one and return the final reward of", "task completion .. note:: `hold_rew_when_done=True` only makes sense for positive rewards. \"\"\" self._tasks", "final_rew = self._is_curr_task_done(state, act, remaining_steps) # zero if the task is not done", "internal check list for done tasks self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks =", "'g') elif self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully completed task self.failed_tasks[self._idx_curr] = True if", "state_des(self, state_des: np.ndarray): \"\"\" Set the desired state the current task. \"\"\" if", "# Reset internal check list for done tasks self.succeeded_tasks = np.full(len(self), False, dtype=bool)", "succeeded or failed but is done!') # Memorize current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr]", "\"\"\" Get the reward function of the current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def", ".. note:: The `ParallelTasks` class is not a subclass of `TaskWrapper`, i.e. this", "Set the desired space the current task. \"\"\" if not isinstance(space_des, Space): raise", "# Give a reward for completing the task defined by the task task_final_rew", "values for done tasks will be stored and added every step :param verbose:", "dtype=bool) self.succeeded_tasks[:start_idx] = True # check off tasks which are before the start", "all tasks have the same env_spec @property def tasks(self) -> Sequence[Task]: \"\"\" Get", "dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] = True # check off tasks", "step reward from the current task. \"\"\" step_rew = 0. if self.hold_rew_when_done: for", "the task is not done # self.logger.add_value('successful tasks', self.successful_tasks) return step_rew + final_rew", "stored reward values for done tasks if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) # doesn't", "subtask \"\"\" if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task has", "so, move to the next one and return the final reward of this", "and self._tasks[self._idx_curr].is_done(state): # Task has not been marked done yet, but is now", "return the final reward of this task. :param state: current state :param act:", "active step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew = self._is_curr_task_done(state, act, remaining_steps) # zero", "number of time steps left in the episode :param verbose: print messages on", "Give a reward for completing the task defined by the task task_final_rew =", "from pyrado.utils.data_types import EnvSpec from pyrado.tasks.base import Task from pyrado.tasks.reward_functions import RewFcn from", "class for a sequence of tasks a.k.a. goals \"\"\" def __init__(self, tasks: Sequence[Task],", "the start task self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose =", "+ 1) % len(self) else: task_final_rew = 0. return task_final_rew def has_succeeded(self, state:", "s.reset(**kwargs) # Reset internal check list for done tasks self.succeeded_tasks = np.full(len(self), False,", "return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des: Space): \"\"\" Set the desired space the", "current sub-task is active step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew = self._is_curr_task_done(state, act,", "int, verbose: bool = False) -> float: \"\"\" Check if the current task", "currently active task. \"\"\" if not (0 <= idx < len(self)): raise pyrado.ValueErr(given=idx,", "sub-tasks. :param state: current state of the environment :param remaining_steps: number of time", "from pyrado.tasks.base import Task from pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output import print_cbt class", "False, dtype=bool) self.succeeded_tasks[:start_idx] = True # check off tasks which are before the", "step reward if current sub-task is active step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew", "np from copy import deepcopy from typing import Sequence import pyrado from pyrado.spaces.base", "task. Since this task holds multiple sub-tasks, the final reward / cost is", "(0 <= idx < len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr =", "state: current state of the environment :param remaining_steps: number of time steps left", "is done. If so, move to the next one and return the final", "be stored and added every step :param verbose: print messages on task completion", "desired space the current task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des: Space):", "of all sub-tasks \"\"\" sum_final_rew = 0. for t in self._tasks: sum_final_rew +=", "# Check off unsuccessfully completed task self.failed_tasks[self._idx_curr] = True if verbose: print_cbt(f'Task {self._idx_curr}", "active task. \"\"\" if not (0 <= idx < len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0',", "succeeded (is done) at state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): # Check off unsuccessfully", "in the episode :return: final reward of all sub-tasks \"\"\" sum_final_rew = 0.", "state :param act: current action :param remaining_steps: number of time steps left in", "on success or failure :return: final return of the current subtask \"\"\" if", "start task self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose = verbose", "sub-tasks \"\"\" sum_final_rew = 0. for t in self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps)", "raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr = idx @property def state_des(self) ->", "import print_cbt class SequentialTasks(Task): \"\"\" Task class for a sequence of tasks a.k.a.", "completing the task defined by the task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) # Advance", "marked done yet, but is now done if self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully", "Space): raise pyrado.TypeErr(given=space_des, expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property def rew_fcn(self) -> RewFcn: \"\"\"", "state of the environment :param remaining_steps: number of time steps left in the", "verbose: bool = False) -> float: \"\"\" Check if the current task is", "for done tasks will be stored and added every step :param verbose: print", "for t in self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps) return sum_final_rew def reset(self, **kwargs):", "self.idx_curr = 0 for s in self._tasks: s.reset(**kwargs) # Reset internal check list", ":param tasks: sequence of tasks a.k.a. goals, the order matters :param start_idx: index", "Since this task holds multiple sub-tasks, the final reward / cost is computed", "for i in range(len(self)): # Iterate over previous tasks if self.succeeded_tasks[i] or self.failed_tasks[i]:", ":param state: current state :param act: current action :param remaining_steps: number of time", "a subclass of `TaskWrapper`, i.e. this function only looks at the immediate sub-tasks.", "has succeeded (is done) at state {state}', 'g') elif self._tasks[self._idx_curr].has_failed(state): # Check off", "= 0. return task_final_rew def has_succeeded(self, state: np.ndarray) -> bool: \"\"\" Check if", "typing import Sequence import pyrado from pyrado.spaces.base import Space from pyrado.utils.data_types import EnvSpec", "def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int) -> float: \"\"\" Get the", "list of tasks. \"\"\" return deepcopy(self._tasks) @property def idx_curr(self) -> int: \"\"\" Get", "self._idx_curr @idx_curr.setter def idx_curr(self, idx: int): \"\"\" Set the index of the currently", "the desired state the current task. \"\"\" if not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des,", "self._tasks[self._idx_curr].state_des = state_des @property def space_des(self) -> Space: \"\"\" Get the desired space", "unsuccessfully completed task self.failed_tasks[self._idx_curr] = True if verbose: print_cbt(f'Task {self._idx_curr} has failed (is", "self.failed_tasks = np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] = True # check off tasks which", "in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True # Reset the stored reward values for done", "False): \"\"\" Constructor :param tasks: sequence of tasks a.k.a. goals, the order matters", "deepcopy from typing import Sequence import pyrado from pyrado.spaces.base import Space from pyrado.utils.data_types", "start_idx def _is_curr_task_done(self, state: np.ndarray, act: np.ndarray, remaining_steps: int, verbose: bool = False)", "# safe to assume that all tasks have the same env_spec @property def", "task self.succeeded_tasks[self._idx_curr] = True if verbose: print_cbt(f'task {self._idx_curr} has succeeded (is done) at", "final reward of all sub-tasks \"\"\" sum_final_rew = 0. for t in self._tasks:", "rew_fcn(self) -> RewFcn: \"\"\" Get the reward function of the current task. \"\"\"", "completion / fail of this task. Since this task holds multiple sub-tasks, the", "if self.succeeded_tasks[i] or self.failed_tasks[i]: # Add the last reward from every done task", "and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task has not been marked done yet,", "done. The SequentialTasks is successful if all sub-tasks are successful. :param state: environments", "self.succeeded_tasks[self._idx_curr] = True if verbose: print_cbt(f'task {self._idx_curr} has succeeded (is done) at state", "self.succeeded_tasks[:kwargs['start_idx']] = True # Reset the stored reward values for done tasks if", "for completing the task defined by the task task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps) #", "successful if all sub-tasks are successful. :param state: environments current state :return: `True`", "task is done. If so, move to the next one and return the", "Reset internal check list for done tasks self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks", "function of the current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray, act:", "done if self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully completed task self.succeeded_tasks[self._idx_curr] = True if", "for s in self._tasks: s.reset(**kwargs) # Reset internal check list for done tasks", "give step reward if current sub-task is active step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps)", "Compute the reward / cost on task completion / fail of this task.", "self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give a reward for completing the", "0 for s in self._tasks: s.reset(**kwargs) # Reset internal check list for done", "holds multiple sub-tasks, the final reward / cost is computed for them, too.", "this task. :param state: current state :param act: current action :param remaining_steps: number", "True if verbose: print_cbt(f'task {self._idx_curr} has succeeded (is done) at state {state}', 'g')", "self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give a reward for completing the task defined by", "- 1}') self._idx_curr = idx @property def state_des(self) -> np.ndarray: \"\"\" Get the", "True # Reset the stored reward values for done tasks if self.hold_rew_when_done: self.held_rews", "space_des @property def rew_fcn(self) -> RewFcn: \"\"\" Get the reward function of the", "self._tasks: s.reset(**kwargs) # Reset internal check list for done tasks self.succeeded_tasks = np.full(len(self),", "Get the index of the currently active task. \"\"\" return self._idx_curr @idx_curr.setter def", "failure :return: final return of the current subtask \"\"\" if not self.succeeded_tasks[self._idx_curr] and", "of the current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray, act: np.ndarray,", "@property def space_des(self) -> Space: \"\"\" Get the desired space the current task.", "tasks a.k.a. goals \"\"\" def __init__(self, tasks: Sequence[Task], start_idx: int = 0, hold_rew_when_done:", "the current task. \"\"\" step_rew = 0. if self.hold_rew_when_done: for i in range(len(self)):", "= np.full(len(self), False, dtype=bool) if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True # Reset", "'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']] = True # Reset the stored reward values for", "next task self.idx_curr = (self._idx_curr + 1) % len(self) else: task_final_rew = 0.", "task self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose = verbose def", "reward from every done task (also true for failed tasks) step_rew += self.held_rews[i]", "all sub-tasks \"\"\" sum_final_rew = 0. for t in self._tasks: sum_final_rew += t.compute_final_rew(state,", "not (0 <= idx < len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr", "\"\"\" Reset all tasks. \"\"\" self.idx_curr = 0 for s in self._tasks: s.reset(**kwargs)", "matters :param start_idx: index of the task to start with, by default with", "`TaskWrapper`, i.e. this function only looks at the immediate sub-tasks. :param state: current", "active task. \"\"\" return self._idx_curr @idx_curr.setter def idx_curr(self, idx: int): \"\"\" Set the", "failed but is done!') # Memorize current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state,", "np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) if 'start_idx' in kwargs: self.succeeded_tasks[:kwargs['start_idx']]", "env_spec @property def tasks(self) -> Sequence[Task]: \"\"\" Get the list of tasks. \"\"\"", "has not been marked done yet, but is now done if self._tasks[self._idx_curr].has_succeeded(state): #", "self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des: Space): \"\"\" Set the desired space the current", "self.hold_rew_when_done: for i in range(len(self)): # Iterate over previous tasks if self.succeeded_tasks[i] or", "remaining_steps) return sum_final_rew def reset(self, **kwargs): \"\"\" Reset all tasks. \"\"\" self.idx_curr =", "{self._idx_curr} neither succeeded or failed but is done!') # Memorize current reward if", "act, remaining_steps) final_rew = self._is_curr_task_done(state, act, remaining_steps) # zero if the task is", "neither succeeded or failed but is done!') # Memorize current reward if self.hold_rew_when_done:", "is now done if self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully completed task self.succeeded_tasks[self._idx_curr] =", "index of the currently active task. \"\"\" if not (0 <= idx <", "messages on task completion .. note:: `hold_rew_when_done=True` only makes sense for positive rewards.", "time steps left in the episode :return: final reward of all sub-tasks \"\"\"", "{self._idx_curr} has failed (is done) at state {state}', 'r') else: raise pyrado.ValueErr(msg=f'Task {self._idx_curr}", "tasks have the same env_spec @property def tasks(self) -> Sequence[Task]: \"\"\" Get the", "default with the first one in the list :param hold_rew_when_done: if `True` reward", "self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task has not been marked done", "self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] = True", "if verbose: print_cbt(f'task {self._idx_curr} has succeeded (is done) at state {state}', 'g') elif", "pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed but is done!') # Memorize current reward", "copy import deepcopy from typing import Sequence import pyrado from pyrado.spaces.base import Space", "= hold_rew_when_done if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose = verbose def __len__(self) ->", "\"\"\" self._tasks = deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks", "task (also true for failed tasks) step_rew += self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or", "Constructor :param tasks: sequence of tasks a.k.a. goals, the order matters :param start_idx:", "the index of the currently active task. \"\"\" return self._idx_curr @idx_curr.setter def idx_curr(self,", "pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr = idx @property def state_des(self) -> np.ndarray:", "remaining_steps) # zero if the task is not done # self.logger.add_value('successful tasks', self.successful_tasks)", "% len(self) else: task_final_rew = 0. return task_final_rew def has_succeeded(self, state: np.ndarray) ->", "index of the currently active task. \"\"\" return self._idx_curr @idx_curr.setter def idx_curr(self, idx:", "False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] = True # check off", "positive rewards. \"\"\" self._tasks = deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks = np.full(len(self), False,", "and added every step :param verbose: print messages on task completion .. note::", "remaining_steps: number of time steps left in the episode :return: final reward of", "cost on task completion / fail of this task. Since this task holds", "same env_spec @property def tasks(self) -> Sequence[Task]: \"\"\" Get the list of tasks.", "import pyrado from pyrado.spaces.base import Space from pyrado.utils.data_types import EnvSpec from pyrado.tasks.base import", "all tasks. \"\"\" self.idx_curr = 0 for s in self._tasks: s.reset(**kwargs) # Reset", "time steps left in the episode :param verbose: print messages on success or", "pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property def space_des(self) -> Space: \"\"\" Get the", "done tasks if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) # doesn't work with start_idx def", "self.verbose = verbose def __len__(self) -> int: return len(self._tasks) @property def env_spec(self) ->", "# check off tasks which are before the start task self.hold_rew_when_done = hold_rew_when_done", "current state :param act: current action :param remaining_steps: number of time steps left", "done tasks self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) if", "= True if verbose: print_cbt(f'Task {self._idx_curr} has failed (is done) at state {state}',", "or self.failed_tasks[i]: # Add the last reward from every done task (also true", "act: np.ndarray, remaining_steps: int, verbose: bool = False) -> float: \"\"\" Check if", "from typing import Sequence import pyrado from pyrado.spaces.base import Space from pyrado.utils.data_types import", "= np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) if 'start_idx' in kwargs:", ":param remaining_steps: number of time steps left in the episode :return: final reward", "return of the current subtask \"\"\" if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and", "task_final_rew def has_succeeded(self, state: np.ndarray) -> bool: \"\"\" Check if this tasks is", "is computed for them, too. .. note:: The `ParallelTasks` class is not a", "task is not done # self.logger.add_value('successful tasks', self.successful_tasks) return step_rew + final_rew def", "if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only give step reward if current sub-task", ":param verbose: print messages on task completion .. note:: `hold_rew_when_done=True` only makes sense", "the desired space the current task. \"\"\" return self._tasks[self._idx_curr].space_des @space_des.setter def space_des(self, space_des:", "expected_type=Space) self._tasks[self._idx_curr].space_des = space_des @property def rew_fcn(self) -> RewFcn: \"\"\" Get the reward", "= True # Reset the stored reward values for done tasks if self.hold_rew_when_done:", "check list for done tasks self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self),", "tasks', self.successful_tasks) return step_rew + final_rew def compute_final_rew(self, state: np.ndarray, remaining_steps: int) ->", "<= idx < len(self)): raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}') self._idx_curr = idx", "import Sequence import pyrado from pyrado.spaces.base import Space from pyrado.utils.data_types import EnvSpec from", "\"\"\" Set the desired state the current task. \"\"\" if not isinstance(state_des, np.ndarray):", "the list of tasks. \"\"\" return deepcopy(self._tasks) @property def idx_curr(self) -> int: \"\"\"", "verbose: print_cbt(f'Task {self._idx_curr} has failed (is done) at state {state}', 'r') else: raise", "if `True` reward values for done tasks will be stored and added every", "task. \"\"\" return self._idx_curr @idx_curr.setter def idx_curr(self, idx: int): \"\"\" Set the index", "from the current task. \"\"\" step_rew = 0. if self.hold_rew_when_done: for i in", "fail of this task. Since this task holds multiple sub-tasks, the final reward", "off successfully completed task self.succeeded_tasks[self._idx_curr] = True if verbose: print_cbt(f'task {self._idx_curr} has succeeded", ":return: final reward of all sub-tasks \"\"\" sum_final_rew = 0. for t in", ":param state: current state of the environment :param remaining_steps: number of time steps", "task holds multiple sub-tasks, the final reward / cost is computed for them,", "np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property def space_des(self) -> Space: \"\"\"", "t in self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps) return sum_final_rew def reset(self, **kwargs): \"\"\"", "`hold_rew_when_done=True` only makes sense for positive rewards. \"\"\" self._tasks = deepcopy(tasks) self._idx_curr =", "task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des: np.ndarray): \"\"\" Set the desired", "= np.all(self.succeeded_tasks) if successful and self.verbose: print_cbt(f'All {len(self)} sequential sub-tasks are done successfully',", "Space from pyrado.utils.data_types import EnvSpec from pyrado.tasks.base import Task from pyrado.tasks.reward_functions import RewFcn", "current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give a reward", ":param hold_rew_when_done: if `True` reward values for done tasks will be stored and", "<filename>Pyrado/pyrado/tasks/sequential.py import numpy as np from copy import deepcopy from typing import Sequence", "previous tasks if self.succeeded_tasks[i] or self.failed_tasks[i]: # Add the last reward from every", "= self._is_curr_task_done(state, act, remaining_steps) # zero if the task is not done #", "in range(len(self)): # Iterate over previous tasks if self.succeeded_tasks[i] or self.failed_tasks[i]: # Add", "\"\"\" Check if the current task is done. If so, move to the", "doesn't work with start_idx def _is_curr_task_done(self, state: np.ndarray, act: np.ndarray, remaining_steps: int, verbose:", "(self._idx_curr + 1) % len(self) else: task_final_rew = 0. return task_final_rew def has_succeeded(self,", "tasks) step_rew += self.held_rews[i] if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]): # Only give step", "for them, too. .. note:: The `ParallelTasks` class is not a subclass of", "Get the list of tasks. \"\"\" return deepcopy(self._tasks) @property def idx_curr(self) -> int:", "if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) self.verbose = verbose def __len__(self) -> int: return", "deepcopy(tasks) self._idx_curr = start_idx self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False,", "\"\"\" Set the index of the currently active task. \"\"\" if not (0", "cost is computed for them, too. .. note:: The `ParallelTasks` class is not", "return self._tasks[0].env_spec # safe to assume that all tasks have the same env_spec", "yet, but is now done if self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully completed task", "done # self.logger.add_value('successful tasks', self.successful_tasks) return step_rew + final_rew def compute_final_rew(self, state: np.ndarray,", "order matters :param start_idx: index of the task to start with, by default", "if self.hold_rew_when_done: for i in range(len(self)): # Iterate over previous tasks if self.succeeded_tasks[i]", "the current subtask \"\"\" if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): #", "too. .. note:: The `ParallelTasks` class is not a subclass of `TaskWrapper`, i.e.", "but is done!') # Memorize current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act,", "final return of the current subtask \"\"\" if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr]", "self.succeeded_tasks[i] or self.failed_tasks[i]: # Add the last reward from every done task (also", "to start with, by default with the first one in the list :param", "if self._tasks[self._idx_curr].has_succeeded(state): # Check off successfully completed task self.succeeded_tasks[self._idx_curr] = True if verbose:", "goals, the order matters :param start_idx: index of the task to start with,", "done tasks will be stored and added every step :param verbose: print messages", "not done # self.logger.add_value('successful tasks', self.successful_tasks) return step_rew + final_rew def compute_final_rew(self, state:", "episode :param verbose: print messages on success or failure :return: final return of", "if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state): # Task has not been", "sequence of tasks a.k.a. goals \"\"\" def __init__(self, tasks: Sequence[Task], start_idx: int =", "import Task from pyrado.tasks.reward_functions import RewFcn from pyrado.utils.input_output import print_cbt class SequentialTasks(Task): \"\"\"", "idx_curr(self) -> int: \"\"\" Get the index of the currently active task. \"\"\"", "in the episode :param verbose: print messages on success or failure :return: final", "\"\"\" Constructor :param tasks: sequence of tasks a.k.a. goals, the order matters :param", "__init__(self, tasks: Sequence[Task], start_idx: int = 0, hold_rew_when_done: bool = False, verbose: bool", "if not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property def space_des(self)", "0. for t in self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps) return sum_final_rew def reset(self,", "\"\"\" Compute the reward / cost on task completion / fail of this", "for a sequence of tasks a.k.a. goals \"\"\" def __init__(self, tasks: Sequence[Task], start_idx:", "if current sub-task is active step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps) final_rew = self._is_curr_task_done(state,", "0. if self.hold_rew_when_done: for i in range(len(self)): # Iterate over previous tasks if", "@idx_curr.setter def idx_curr(self, idx: int): \"\"\" Set the index of the currently active", "reward / cost on task completion / fail of this task. Since this", "-> float: \"\"\" Compute the reward / cost on task completion / fail", "/ cost is computed for them, too. .. note:: The `ParallelTasks` class is", "final reward / cost is computed for them, too. .. note:: The `ParallelTasks`", "= (self._idx_curr + 1) % len(self) else: task_final_rew = 0. return task_final_rew def", "from every done task (also true for failed tasks) step_rew += self.held_rews[i] if", "check off tasks which are before the start task self.hold_rew_when_done = hold_rew_when_done if", "RewFcn: \"\"\" Get the reward function of the current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn", "\"\"\" Get the desired state the current task. \"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def", "def reset(self, **kwargs): \"\"\" Reset all tasks. \"\"\" self.idx_curr = 0 for s", "move to the next one and return the final reward of this task.", "tasks is done. The SequentialTasks is successful if all sub-tasks are successful. :param", "raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed but is done!') # Memorize current", "def env_spec(self) -> EnvSpec: return self._tasks[0].env_spec # safe to assume that all tasks", "current state :return: `True` if succeeded \"\"\" successful = np.all(self.succeeded_tasks) if successful and", "# Check off successfully completed task self.succeeded_tasks[self._idx_curr] = True if verbose: print_cbt(f'task {self._idx_curr}", "for done tasks if self.hold_rew_when_done: self.held_rews = np.zeros(len(self)) # doesn't work with start_idx", "\"\"\" return self._tasks[self._idx_curr].state_des @state_des.setter def state_des(self, state_des: np.ndarray): \"\"\" Set the desired state", "work with start_idx def _is_curr_task_done(self, state: np.ndarray, act: np.ndarray, remaining_steps: int, verbose: bool", "-> np.ndarray: \"\"\" Get the desired state the current task. \"\"\" return self._tasks[self._idx_curr].state_des", "self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) if 'start_idx' in", "which are before the start task self.hold_rew_when_done = hold_rew_when_done if self.hold_rew_when_done: self.held_rews =", "remaining_steps: int) -> float: \"\"\" Compute the reward / cost on task completion", "safe to assume that all tasks have the same env_spec @property def tasks(self)", "self.held_rews = np.zeros(len(self)) # doesn't work with start_idx def _is_curr_task_done(self, state: np.ndarray, act:", "state: np.ndarray) -> bool: \"\"\" Check if this tasks is done. The SequentialTasks", "successful = np.all(self.succeeded_tasks) if successful and self.verbose: print_cbt(f'All {len(self)} sequential sub-tasks are done", "# Memorize current reward if self.hold_rew_when_done: self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give", "Only give step reward if current sub-task is active step_rew += self._tasks[self._idx_curr].step_rew(state, act,", "np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] = True # check off tasks which are before", "= idx @property def state_des(self) -> np.ndarray: \"\"\" Get the desired state the", "task self.idx_curr = (self._idx_curr + 1) % len(self) else: task_final_rew = 0. return", "-> int: \"\"\" Get the index of the currently active task. \"\"\" return", "immediate sub-tasks. :param state: current state of the environment :param remaining_steps: number of", "step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int) -> float: \"\"\" Get the step", "= np.zeros(len(self)) self.verbose = verbose def __len__(self) -> int: return len(self._tasks) @property def", "step_rew = 0. if self.hold_rew_when_done: for i in range(len(self)): # Iterate over previous", "the reward / cost on task completion / fail of this task. Since", "this tasks is done. The SequentialTasks is successful if all sub-tasks are successful.", "start_idx self.succeeded_tasks = np.full(len(self), False, dtype=bool) self.failed_tasks = np.full(len(self), False, dtype=bool) self.succeeded_tasks[:start_idx] =", "task. \"\"\" step_rew = 0. if self.hold_rew_when_done: for i in range(len(self)): # Iterate", "if the current task is done. If so, move to the next one", "self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0) # Give a reward for completing the task", "environments current state :return: `True` if succeeded \"\"\" successful = np.all(self.succeeded_tasks) if successful", "reward of this task. :param state: current state :param act: current action :param", "state_des: np.ndarray): \"\"\" Set the desired state the current task. \"\"\" if not", "\"\"\" Set the desired space the current task. \"\"\" if not isinstance(space_des, Space):", "np.ndarray, remaining_steps: int, verbose: bool = False) -> float: \"\"\" Check if the", "Iterate over previous tasks if self.succeeded_tasks[i] or self.failed_tasks[i]: # Add the last reward", "The `ParallelTasks` class is not a subclass of `TaskWrapper`, i.e. this function only", "Set the desired state the current task. \"\"\" if not isinstance(state_des, np.ndarray): raise", "expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property def space_des(self) -> Space: \"\"\" Get the desired", "t.compute_final_rew(state, remaining_steps) return sum_final_rew def reset(self, **kwargs): \"\"\" Reset all tasks. \"\"\" self.idx_curr", "start_idx: int = 0, hold_rew_when_done: bool = False, verbose: bool = False): \"\"\"", "__len__(self) -> int: return len(self._tasks) @property def env_spec(self) -> EnvSpec: return self._tasks[0].env_spec #", "a reward for completing the task defined by the task task_final_rew = self._tasks[self._idx_curr].final_rew(state,", "pyrado.utils.input_output import print_cbt class SequentialTasks(Task): \"\"\" Task class for a sequence of tasks", "task. \"\"\" if not isinstance(state_des, np.ndarray): raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray) self._tasks[self._idx_curr].state_des = state_des @property", "self._tasks: sum_final_rew += t.compute_final_rew(state, remaining_steps) return sum_final_rew def reset(self, **kwargs): \"\"\" Reset all", "episode :return: final reward of all sub-tasks \"\"\" sum_final_rew = 0. for t", "the current task. \"\"\" return self._tasks[self._idx_curr].rew_fcn def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps:" ]
[ "as binary strings # sample long integer l = (2**32-2)*(2**32-3)*(2**32-7) # input long", "# create a list ls = [] while l: # extract each byte", "l >>= 8 ls.append(bot) # pad the list up to 32 items ls.extend([0]*(32-len(ls)))", "list bot = l&((1<<8)-1) l >>= 8 ls.append(bot) # pad the list up", "<gh_stars>0 #!/usr/bin/env python2.7 # Deal with long integer encoded as binary strings #", "# build up output data (signature) binstring = [chr(c) for c in ls]", "list ls.reverse() # build up output data (signature) binstring = [chr(c) for c", "store it into the list bot = l&((1<<8)-1) l >>= 8 ls.append(bot) #", "data (signature) binstring = [chr(c) for c in ls] # output debug string", "[chr(c) for c in ls] # output debug string print \"(%d) %s\" %", "(signature) binstring = [chr(c) for c in ls] # output debug string print", "long integer and store it into the list bot = l&((1<<8)-1) l >>=", "python2.7 # Deal with long integer encoded as binary strings # sample long", "for c in ls] # output debug string print \"(%d) %s\" % (len(binstring),", "extract each byte of the long integer and store it into the list", "and store it into the list bot = l&((1<<8)-1) l >>= 8 ls.append(bot)", "the list ls.reverse() # build up output data (signature) binstring = [chr(c) for", "long integer encoded as binary strings # sample long integer l = (2**32-2)*(2**32-3)*(2**32-7)", "strings # sample long integer l = (2**32-2)*(2**32-3)*(2**32-7) # input long integer print", "32 items ls.extend([0]*(32-len(ls))) # reverse the list ls.reverse() # build up output data", "create a list ls = [] while l: # extract each byte of", "up output data (signature) binstring = [chr(c) for c in ls] # output", "integer print \"%x\" % l # create a list ls = [] while", "ls.reverse() # build up output data (signature) binstring = [chr(c) for c in", "to 32 items ls.extend([0]*(32-len(ls))) # reverse the list ls.reverse() # build up output", "# input long integer print \"%x\" % l # create a list ls", "pad the list up to 32 items ls.extend([0]*(32-len(ls))) # reverse the list ls.reverse()", "items ls.extend([0]*(32-len(ls))) # reverse the list ls.reverse() # build up output data (signature)", "l&((1<<8)-1) l >>= 8 ls.append(bot) # pad the list up to 32 items", "encoded as binary strings # sample long integer l = (2**32-2)*(2**32-3)*(2**32-7) # input", "ls.append(bot) # pad the list up to 32 items ls.extend([0]*(32-len(ls))) # reverse the", "debug string print \"(%d) %s\" % (len(binstring), ''.join('%x' % c for c in", "8 ls.append(bot) # pad the list up to 32 items ls.extend([0]*(32-len(ls))) # reverse", "l # create a list ls = [] while l: # extract each", "into the list bot = l&((1<<8)-1) l >>= 8 ls.append(bot) # pad the", "it into the list bot = l&((1<<8)-1) l >>= 8 ls.append(bot) # pad", "# reverse the list ls.reverse() # build up output data (signature) binstring =", "# sample long integer l = (2**32-2)*(2**32-3)*(2**32-7) # input long integer print \"%x\"", "integer encoded as binary strings # sample long integer l = (2**32-2)*(2**32-3)*(2**32-7) #", "list ls = [] while l: # extract each byte of the long", "the long integer and store it into the list bot = l&((1<<8)-1) l", "of the long integer and store it into the list bot = l&((1<<8)-1)", "ls] # output debug string print \"(%d) %s\" % (len(binstring), ''.join('%x' % c", "string print \"(%d) %s\" % (len(binstring), ''.join('%x' % c for c in ls))", "# extract each byte of the long integer and store it into the", "#!/usr/bin/env python2.7 # Deal with long integer encoded as binary strings # sample", "integer and store it into the list bot = l&((1<<8)-1) l >>= 8", "in ls] # output debug string print \"(%d) %s\" % (len(binstring), ''.join('%x' %", "ls.extend([0]*(32-len(ls))) # reverse the list ls.reverse() # build up output data (signature) binstring", "binstring = [chr(c) for c in ls] # output debug string print \"(%d)", "[] while l: # extract each byte of the long integer and store", "sample long integer l = (2**32-2)*(2**32-3)*(2**32-7) # input long integer print \"%x\" %", "while l: # extract each byte of the long integer and store it", "= l&((1<<8)-1) l >>= 8 ls.append(bot) # pad the list up to 32", "= (2**32-2)*(2**32-3)*(2**32-7) # input long integer print \"%x\" % l # create a", "the list up to 32 items ls.extend([0]*(32-len(ls))) # reverse the list ls.reverse() #", "ls = [] while l: # extract each byte of the long integer", "print \"%x\" % l # create a list ls = [] while l:", "= [chr(c) for c in ls] # output debug string print \"(%d) %s\"", "binary strings # sample long integer l = (2**32-2)*(2**32-3)*(2**32-7) # input long integer", "# Deal with long integer encoded as binary strings # sample long integer", "reverse the list ls.reverse() # build up output data (signature) binstring = [chr(c)", "Deal with long integer encoded as binary strings # sample long integer l", "\"%x\" % l # create a list ls = [] while l: #", "l: # extract each byte of the long integer and store it into", "c in ls] # output debug string print \"(%d) %s\" % (len(binstring), ''.join('%x'", "output debug string print \"(%d) %s\" % (len(binstring), ''.join('%x' % c for c", "= [] while l: # extract each byte of the long integer and", "l = (2**32-2)*(2**32-3)*(2**32-7) # input long integer print \"%x\" % l # create", "# pad the list up to 32 items ls.extend([0]*(32-len(ls))) # reverse the list", "each byte of the long integer and store it into the list bot", "bot = l&((1<<8)-1) l >>= 8 ls.append(bot) # pad the list up to", "byte of the long integer and store it into the list bot =", "input long integer print \"%x\" % l # create a list ls =", "integer l = (2**32-2)*(2**32-3)*(2**32-7) # input long integer print \"%x\" % l #", "long integer print \"%x\" % l # create a list ls = []", "# output debug string print \"(%d) %s\" % (len(binstring), ''.join('%x' % c for", "with long integer encoded as binary strings # sample long integer l =", "long integer l = (2**32-2)*(2**32-3)*(2**32-7) # input long integer print \"%x\" % l", "build up output data (signature) binstring = [chr(c) for c in ls] #", "output data (signature) binstring = [chr(c) for c in ls] # output debug", "a list ls = [] while l: # extract each byte of the", "up to 32 items ls.extend([0]*(32-len(ls))) # reverse the list ls.reverse() # build up", "list up to 32 items ls.extend([0]*(32-len(ls))) # reverse the list ls.reverse() # build", "(2**32-2)*(2**32-3)*(2**32-7) # input long integer print \"%x\" % l # create a list", ">>= 8 ls.append(bot) # pad the list up to 32 items ls.extend([0]*(32-len(ls))) #", "the list bot = l&((1<<8)-1) l >>= 8 ls.append(bot) # pad the list", "% l # create a list ls = [] while l: # extract" ]
[ "start_num_points = len(self.summary_scores) xdata = np.array([x for x in range(1, start_num_points)]) self.lines =", "right number of times for breed_count in range(available[0][1], self.breeding_times): try: # try to", "# if the partner's bred the requisite number of times, remove them from", "= self.population_size \"\"\"Removes the bottom scorers of the population until the population fits", "= self.breeding_times self.offspring = [] for pop_num in range(size): for breed_num in range(times):", "basics import random import math import matplotlib.pyplot as plt import numpy as np", "in range(3)] else: self.lines = [self.ax.plot([], [])[0] for num in range(3)] self.ax.relim() self.ax.autoscale_view(True,", "reproduce to make a mutated offspring def __init__(self, key=None): self.key = key def", "import basics import random import math import matplotlib.pyplot as plt import numpy as", "node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count =", "line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]])", "x in population_ranking[-size:]] # The actual scores, with the same indices as their", "new_data[1])) def update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True,", "cycles of breed and cull for num in range(ntimes): self.cycles_count += 1 self.breed()", "as the base, breed them with random partners # in available, then remove", "score, algorithm, *args, graphing=False, **kwargs): \"\"\"Makes n algorithms, returns the avg time for", "of times, remove them from available if choice[1] == self.breeding_times: available.remove(choice) # remove", "type of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text)))", "history self.summary_scores = [] # stores min max mean median self.node = node_class", "breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation, make the new population", "everyone's bred, break the loop if len(available) == 0: break # archive the", "remove our start node from available del(available[0]) # if everyone's bred, break the", "class node(): # Has a keyword that defines it's means ofo decrypting the", "#mutations/changes, can cull to select for best english scoring offspring def __init__(self, text,", "the loop if len(available) == 0: break # archive the parent generation, make", "self.past_generations = [] self.complete_scores = [] # stores the complete score history self.summary_scores", "# in available, then remove first node from available # range(...) ensures we", "num in range(n): print('{0} out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if", "# Initializes the population with size self.population, hopefully near to endpoint pass def", "= False #When turned on, cull() passes new scores to the graph #self.initialize_population()", "as plt import numpy as np import cProfile import pstats import time class", "cull for num in range(ntimes): self.cycles_count += 1 self.breed() self.cull() def run_to_score(self, score):", "# From each node in population we get [node_index, node_score] in population_ranking population_ranking", "size=None): #size is the final size (post culling) of the population if size", "in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for num in", "**kwargs): \"\"\"Makes n algorithms, returns the avg time for them to run to", "self.cycles_count += 1 self.breed() self.cull() def run_to_score(self, score): # Keeps cycling until the", "plt.show() def update_line(self, line, new_data): #Given a line and new_data of the form", "will increment each time a node breeds, until it reaches breeding_times available =", "#size is the final size (post culling) of the population if size ==", "cycling until the latest population's mean score is greater than score while True:", "try: # try to choose a partner from those in available choice =", "#Sometimes the last guy gets left out #print('ruh roh') choice = [random.choice(self.population), -1]", "self.ax = plt.gca() if len(self.summary_scores) > 0: start_num_points = len(self.summary_scores) xdata = np.array([x", "self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing", "self.text = text self.breeding_times = breeding_times # how many times each parent will", "Keeps cycling until the latest population's mean score is greater than score while", "self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed count by one choice[1] += 1 #", "the right number of times for breed_count in range(available[0][1], self.breeding_times): try: # try", "in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median =", "mutated offspring def __init__(self, key=None): self.key = key def reproduce(self): pass class algorithm():", "range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def update_line(self, line, new_data): #Given a", "def initialize_graph(self): self.graphing = True self.ax = plt.gca() if len(self.summary_scores) > 0: start_num_points", "the graph if self.graphing == True: self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None, times=None):", "times = self.breeding_times self.offspring = [] # 0 will increment each time a", "= [random.choice(self.population), -1] # breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the", "#When turned on, cull() passes new scores to the graph #self.initialize_population() def initialize_graph(self):", "# if graphing is turned on, send the new data to the graph", "= basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing", "in range(ntimes): self.cycles_count += 1 self.breed() self.cull() def run_to_score(self, score): # Keeps cycling", "of newly bred offspring, randomly selecting who pairs with who\"\"\" if size ==", "self.population_size if times == None: times = self.breeding_times self.offspring = [] for pop_num", "np import cProfile import pstats import time class node(): # Has a keyword", "True) plt.ion() plt.show() def update_line(self, line, new_data): #Given a line and new_data of", "partner from those in available choice = random.choice(available[1:]) except IndexError: #Sometimes the last", "the latest population's mean score is greater than score while True: self.cycle() if", "== None: size = self.population_size \"\"\"Removes the bottom scorers of the population until", "out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for line in", "len(available) == 0: break # archive the parent generation, make the new population", "= self.offspring def cull(self, size=None): #size is the final size (post culling) of", "in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def initialize_population(self):", "#has a population of nodes with keywords, can breed to make offspring with", "size (post culling) of the population if size == None: size = self.population_size", "def __init__(self, text, population_size, breeding_times, node_class): self.text = text self.breeding_times = breeding_times #", "self.breeding_times: available.remove(choice) # remove our start node from available del(available[0]) # if everyone's", "node_class): self.text = text self.breeding_times = breeding_times # how many times each parent", "node in available as the base, breed them with random partners # in", "if everyone's bred, break the loop if len(available) == 0: break # archive", "choice[1] == self.breeding_times: available.remove(choice) # remove our start node from available del(available[0]) #", "offspring. self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs): \"\"\"Makes", "whole load of newly bred offspring, randomly selecting who pairs with who\"\"\" if", "them from available if choice[1] == self.breeding_times: available.remove(choice) # remove our start node", "in available choice = random.choice(available[1:]) except IndexError: #Sometimes the last guy gets left", "each time a node breeds, until it reaches breeding_times available = [[x, 0]", "random partners # in available, then remove first node from available # range(...)", "len(self.summary_scores) > 0: start_num_points = len(self.summary_scores) xdata = np.array([x for x in range(1,", "node from available # range(...) ensures we breed the right number of times", "len(self.summary_scores) xdata = np.array([x for x in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num]", "new_data of the form [new_x, new_y], adds on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0]))", "data to the graph if self.graphing == True: self.update_graph() class genetic_algorithm(algorithm): def breed(self,", "select for best english scoring offspring def __init__(self, text, population_size, breeding_times, node_class): self.text", "= cProfile.Profile() for num in range(n): print('{0} out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)',", "genetic_algorithm(algorithm): def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole load of newly", "num in range(ntimes): self.cycles_count += 1 self.breed() self.cull() def run_to_score(self, score): # Keeps", "x: x[1]) # sort by score from lowest to highest # The new", "for num in range(n): print('{0} out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals())", "cullsize): for num in range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def", "for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01)", "if self.summary_scores[-1][2] > score: break def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for num", "== self.breeding_times: available.remove(choice) # remove our start node from available del(available[0]) # if", "is the index of the node self.population = [self.population[x[0]] for x in population_ranking[-size:]]", "who\"\"\" if size == None: size = self.population_size if times == None: times", "# how many times each parent will breed self.population_size = population_size self.population =", "that defines it's means ofo decrypting the text # Can reproduce to make", "self.population_size = population_size self.population = [] self.past_generations = [] self.complete_scores = [] #", "for x in self.population] # who is left available while True: # take", "with keywords, can breed to make offspring with random #mutations/changes, can cull to", "in available as the base, breed them with random partners # in available,", "the offspring. self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs):", "generation, make the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def cull(self,", "prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for line in algorithms[num].lines: line.remove() stats = pstats.Stats()", "population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest to highest # The", "while True: self.cycle() if self.summary_scores[-1][2] > score: break def turnover(self, breeding_times, num_cycles_small, num_cycles_large,", "# stores the complete score history self.summary_scores = [] # stores min max", "then remove first node from available # range(...) ensures we breed the right", "= [] # stores min max mean median self.node = node_class # stores", "[] self.complete_scores = [] # stores the complete score history self.summary_scores = []", "population's mean score is greater than score while True: self.cycle() if self.summary_scores[-1][2] >", "score while True: self.cycle() if self.summary_scores[-1][2] > score: break def turnover(self, breeding_times, num_cycles_small,", "bottom scorers of the population until the population fits population_size\"\"\" # From each", "start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for num in range(3)]", "= [self.ax.plot([], [])[0] for num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show()", "keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median,", "score, given\"\"\" algorithms = [] for num in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing:", "True, True) plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes the population with size self.population,", "in range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation, make", "for them to run to a score, given\"\"\" algorithms = [] for num", "choice = [random.choice(self.population), -1] # breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase", "Can reproduce to make a mutated offspring def __init__(self, key=None): self.key = key", "self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing = False #When turned on, cull()", "__init__(self, key=None): self.key = key def reproduce(self): pass class algorithm(): #has a population", "if times == None: times = self.breeding_times self.offspring = [] for pop_num in", "pop_num in range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation,", "latest population's mean score is greater than score while True: self.cycle() if self.summary_scores[-1][2]", "culling) of the population if size == None: size = self.population_size \"\"\"Removes the", "[x[1] for x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile =", "**kwargs)) if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for num in range(n): print('{0} out", "== None: size = self.population_size if times == None: times = self.breeding_times self.offspring", "node_score] in population_ranking population_ranking = [[x, self.score(self.population[x])] for x in \\ range(len(self.population))] population_ranking.sort(key=lambda", "# Does ntimes cycles of breed and cull for num in range(ntimes): self.cycles_count", "final size (post culling) of the population if size == None: size =", "hopefully near to endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text,", "population_size\"\"\" # From each node in population we get [node_index, node_score] in population_ranking", "range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole", "bred, break the loop if len(available) == 0: break # archive the parent", "self.summary_scores[-1][2] > score: break def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for num in", "times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole load of", "the partner's bred the requisite number of times, remove them from available if", "> score: break def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for num in range(num_cycles_large):", "the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed count by one choice[1]", "of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count", "the population with size self.population, hopefully near to endpoint pass def score(self, my_node):", "= [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for num in range(3)] else: self.lines", "complete score history self.summary_scores = [] # stores min max mean median self.node", "= self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing is turned", "breed the right number of times for breed_count in range(available[0][1], self.breeding_times): try: #", "breeding_times # how many times each parent will breed self.population_size = population_size self.population", "self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes the population with", "last guy gets left out #print('ruh roh') choice = [random.choice(self.population), -1] # breed", "of the population until the population fits population_size\"\"\" # From each node in", "self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing = False #When turned on, cull() passes new", "to endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key): pass", "the final size (post culling) of the population if size == None: size", "initialize_graph(self): self.graphing = True self.ax = plt.gca() if len(self.summary_scores) > 0: start_num_points =", "None: size = self.population_size if times == None: times = self.breeding_times self.offspring =", "size=None, times=None): \"\"\"Replaces self.population with a whole load of newly bred offspring, randomly", "math import matplotlib.pyplot as plt import numpy as np import cProfile import pstats", "new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def cull(self, size=None): #size is", "breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed count by", "breed to make offspring with random #mutations/changes, can cull to select for best", "stores the type of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score", "num in range(3)] else: self.lines = [self.ax.plot([], [])[0] for num in range(3)] self.ax.relim()", "[[x, self.score(self.population[x])] for x in \\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort by", "xdata = np.array([x for x in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for", "line and new_data of the form [new_x, new_y], adds on the new values", "random.choice(available[1:]) except IndexError: #Sometimes the last guy gets left out #print('ruh roh') choice", "True, True) plt.ion() plt.show() def update_line(self, line, new_data): #Given a line and new_data", "the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n, score, algorithm,", "left available while True: # take the first node in available as the", "number of times for breed_count in range(available[0][1], self.breeding_times): try: # try to choose", "break # archive the parent generation, make the new population the offspring. self.past_generations.append(self.population)", "population_size self.population = [] self.past_generations = [] self.complete_scores = [] # stores the", "number of times, remove them from available if choice[1] == self.breeding_times: available.remove(choice) #", "# x[0] is the index of the node self.population = [self.population[x[0]] for x", "available.remove(choice) # remove our start node from available del(available[0]) # if everyone's bred,", "# breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed count", "self.breeding_times self.offspring = [] for pop_num in range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce())", "passes new scores to the graph #self.initialize_population() def initialize_graph(self): self.graphing = True self.ax", "greater than score while True: self.cycle() if self.summary_scores[-1][2] > score: break def turnover(self,", "randomly selecting who pairs with who\"\"\" if size == None: size = self.population_size", "the graph #self.initialize_population() def initialize_graph(self): self.graphing = True self.ax = plt.gca() if len(self.summary_scores)", "[score[num] for score in self.summary_scores])[0] for num in range(3)] else: self.lines = [self.ax.plot([],", "#Given a line and new_data of the form [new_x, new_y], adds on the", "= self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing is turned on, send the", "as np import cProfile import pstats import time class node(): # Has a", "if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for num in range(n): print('{0} out of", "population the offspring. self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n, score, algorithm, *args, graphing=False,", "self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile])", "range(ntimes): self.cycles_count += 1 self.breed() self.cull() def run_to_score(self, score): # Keeps cycling until", "algorithms, returns the avg time for them to run to a score, given\"\"\"", "for pop_num in range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent", "== True: self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None, times=None): \"\"\"Replaces self.population with a", "how many times each parent will breed self.population_size = population_size self.population = []", "self.cull(size=cullsize) def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole load of newly", "in population_ranking[-size:]] # The actual scores, with the same indices as their node", "it's means ofo decrypting the text # Can reproduce to make a mutated", "in population self.ranking = [x[1] for x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile", "score: break def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for num in range(num_cycles_large): for", "self.key = key def reproduce(self): pass class algorithm(): #has a population of nodes", "class algorithm(): #has a population of nodes with keywords, can breed to make", "= [x[1] for x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile", "top population_size guys as ranked # x[0] is the index of the node", "breeds, until it reaches breeding_times available = [[x, 0] for x in self.population]", "a line and new_data of the form [new_x, new_y], adds on the new", "[self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for num in range(3)] else: self.lines =", "the population if size == None: size = self.population_size \"\"\"Removes the bottom scorers", "# stores min max mean median self.node = node_class # stores the type", "n algorithms, returns the avg time for them to run to a score,", "num_cycles_large, cullsize): for num in range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize)", "= [] # stores the complete score history self.summary_scores = [] # stores", "turned on, cull() passes new scores to the graph #self.initialize_population() def initialize_graph(self): self.graphing", "in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def update_line(self, line, new_data): #Given", "values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count,", "score): # Keeps cycling until the latest population's mean score is greater than", "is turned on, send the new data to the graph if self.graphing ==", "a partner from those in available choice = random.choice(available[1:]) except IndexError: #Sometimes the", "= random.choice(available[1:]) except IndexError: #Sometimes the last guy gets left out #print('ruh roh')", "import matplotlib.pyplot as plt import numpy as np import cProfile import pstats import", "ofo decrypting the text # Can reproduce to make a mutated offspring def", "endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key): pass def", "self.graphing == True: self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None, times=None): \"\"\"Replaces self.population with", "cull to select for best english scoring offspring def __init__(self, text, population_size, breeding_times,", "highest # The new population is the top population_size guys as ranked #", "for num in range(3)] else: self.lines = [self.ax.plot([], [])[0] for num in range(3)]", "times each parent will breed self.population_size = population_size self.population = [] self.past_generations =", "class genetic_algorithm(algorithm): def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole load of", "num in range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None,", "the bottom scorers of the population until the population fits population_size\"\"\" # From", "plt.pause(0.01) def initialize_population(self): # Initializes the population with size self.population, hopefully near to", "numpy as np import cProfile import pstats import time class node(): # Has", "them to run to a score, given\"\"\" algorithms = [] for num in", "who pairs with who\"\"\" if size == None: size = self.population_size if times", "cProfile import pstats import time class node(): # Has a keyword that defines", "= self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if", "score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key): pass def cycle(self, ntimes=1): #", "0 will increment each time a node breeds, until it reaches breeding_times available", "algorithm, *args, graphing=False, **kwargs): \"\"\"Makes n algorithms, returns the avg time for them", "breed_count in range(available[0][1], self.breeding_times): try: # try to choose a partner from those", "will breed self.population_size = population_size self.population = [] self.past_generations = [] self.complete_scores =", "for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None): \"\"\"Replaces self.population", "= [[x, 0] for x in self.population] # who is left available while", "self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0", "None: times = self.breeding_times self.offspring = [] # 0 will increment each time", "range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for num in range(n):", "reaches breeding_times available = [[x, 0] for x in self.population] # who is", "left out #print('ruh roh') choice = [random.choice(self.population), -1] # breed with the chosen", "# try to choose a partner from those in available choice = random.choice(available[1:])", "the population until the population fits population_size\"\"\" # From each node in population", "node(): # Has a keyword that defines it's means ofo decrypting the text", "Initializes the population with size self.population, hopefully near to endpoint pass def score(self,", "range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation, make the", "the type of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score =", "range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest to highest #", "available # range(...) ensures we breed the right number of times for breed_count", "num_cycles_small, num_cycles_large, cullsize): for num in range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times)", "gets left out #print('ruh roh') choice = [random.choice(self.population), -1] # breed with the", "graphing=False, **kwargs): \"\"\"Makes n algorithms, returns the avg time for them to run", "self.population_size \"\"\"Removes the bottom scorers of the population until the population fits population_size\"\"\"", "selecting who pairs with who\"\"\" if size == None: size = self.population_size if", "breed count by one choice[1] += 1 # if the partner's bred the", "archive the parent generation, make the new population the offspring. self.past_generations.append(self.population) self.population =", "globals(), locals()) if graphing: for line in algorithms[num].lines: line.remove() stats = pstats.Stats() stats.add(prof)", "import math import matplotlib.pyplot as plt import numpy as np import cProfile import", "True: self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole", "#self.initialize_population() def initialize_graph(self): self.graphing = True self.ax = plt.gca() if len(self.summary_scores) > 0:", "[self.ax.plot([], [])[0] for num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def", "decrypting the text # Can reproduce to make a mutated offspring def __init__(self,", "the partner's breed count by one choice[1] += 1 # if the partner's", "The actual scores, with the same indices as their node counterparts in population", "from those in available choice = random.choice(available[1:]) except IndexError: #Sometimes the last guy", "parent generation, make the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def", "offspring. self.past_generations.append(self.population) self.population = self.offspring def cull(self, size=None): #size is the final size", "guys as ranked # x[0] is the index of the node self.population =", "avg time for them to run to a score, given\"\"\" algorithms = []", "def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs): \"\"\"Makes n algorithms, returns the avg", "bred offspring, randomly selecting who pairs with who\"\"\" if size == None: size", "score from lowest to highest # The new population is the top population_size", "except IndexError: #Sometimes the last guy gets left out #print('ruh roh') choice =", "my_node.key))) def decrypt(self, text, key): pass def cycle(self, ntimes=1): # Does ntimes cycles", "guy gets left out #print('ruh roh') choice = [random.choice(self.population), -1] # breed with", "#score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile,", "with who\"\"\" if size == None: size = self.population_size if times == None:", "to the graph #self.initialize_population() def initialize_graph(self): self.graphing = True self.ax = plt.gca() if", "is left available while True: # take the first node in available as", "True: # take the first node in available as the base, breed them", "times, remove them from available if choice[1] == self.breeding_times: available.remove(choice) # remove our", "the complete score history self.summary_scores = [] # stores min max mean median", "remove first node from available # range(...) ensures we breed the right number", "num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def", "algorithms[-1].initialize_graph() prof = cProfile.Profile() for num in range(n): print('{0} out of {1}:'.format(num+1, n),", "breeding_times, node_class): self.text = text self.breeding_times = breeding_times # how many times each", "self.score(self.population[x])] for x in \\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort by score", "= self.offspring def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs): \"\"\"Makes n algorithms, returns", "__init__(self, text, population_size, breeding_times, node_class): self.text = text self.breeding_times = breeding_times # how", "scores, with the same indices as their node counterparts in population self.ranking =", "True) plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes the population with size self.population, hopefully", "text, population_size, breeding_times, node_class): self.text = text self.breeding_times = breeding_times # how many", "start node from available del(available[0]) # if everyone's bred, break the loop if", "the top population_size guys as ranked # x[0] is the index of the", "0] for x in self.population] # who is left available while True: #", "def update_line(self, line, new_data): #Given a line and new_data of the form [new_x,", "node from available del(available[0]) # if everyone's bred, break the loop if len(available)", "update_line(self, line, new_data): #Given a line and new_data of the form [new_x, new_y],", "self.summary_scores = [] # stores min max mean median self.node = node_class #", "= [] self.past_generations = [] self.complete_scores = [] # stores the complete score", "self.breed() self.cull() def run_to_score(self, score): # Keeps cycling until the latest population's mean", "to make a mutated offspring def __init__(self, key=None): self.key = key def reproduce(self):", "scores to the graph #self.initialize_population() def initialize_graph(self): self.graphing = True self.ax = plt.gca()", "population fits population_size\"\"\" # From each node in population we get [node_index, node_score]", "self.offspring = [] # 0 will increment each time a node breeds, until", "get [node_index, node_score] in population_ranking population_ranking = [[x, self.score(self.population[x])] for x in \\", "with random partners # in available, then remove first node from available #", "choice = random.choice(available[1:]) except IndexError: #Sometimes the last guy gets left out #print('ruh", "loop if len(available) == 0: break # archive the parent generation, make the", "self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing is turned on, send the new", "cull() passes new scores to the graph #self.initialize_population() def initialize_graph(self): self.graphing = True", "def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole load of newly bred", "the same indices as their node counterparts in population self.ranking = [x[1] for", "population with size self.population, hopefully near to endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text,", "text self.breeding_times = breeding_times # how many times each parent will breed self.population_size", "self.population = [] self.past_generations = [] self.complete_scores = [] # stores the complete", "breed and cull for num in range(ntimes): self.cycles_count += 1 self.breed() self.cull() def", "population is the top population_size guys as ranked # x[0] is the index", "first node in available as the base, breed them with random partners #", "pstats import time class node(): # Has a keyword that defines it's means", "node self.population = [self.population[x[0]] for x in population_ranking[-size:]] # The actual scores, with", "# Keeps cycling until the latest population's mean score is greater than score", "range(3)] else: self.lines = [self.ax.plot([], [])[0] for num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True,", "toppercentile]) # if graphing is turned on, send the new data to the", "> 0: start_num_points = len(self.summary_scores) xdata = np.array([x for x in range(1, start_num_points)])", "for x in population_ranking[-size:]] # The actual scores, with the same indices as", "choice[1] += 1 # if the partner's bred the requisite number of times,", "median, toppercentile]) # if graphing is turned on, send the new data to", "from available del(available[0]) # if everyone's bred, break the loop if len(available) ==", "population if size == None: size = self.population_size \"\"\"Removes the bottom scorers of", "partner's breed count by one choice[1] += 1 # if the partner's bred", "algorithms = [] for num in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof", "breed self.population_size = population_size self.population = [] self.past_generations = [] self.complete_scores = []", "= [] for num in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof =", "range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation, make the new population the offspring.", "plt.gca() if len(self.summary_scores) > 0: start_num_points = len(self.summary_scores) xdata = np.array([x for x", "can cull to select for best english scoring offspring def __init__(self, text, population_size,", "self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def update_line(self, line, new_data): #Given a line", "if size == None: size = self.population_size \"\"\"Removes the bottom scorers of the", "ensures we breed the right number of times for breed_count in range(available[0][1], self.breeding_times):", "== None: times = self.breeding_times self.offspring = [] # 0 will increment each", "nodes with keywords, can breed to make offspring with random #mutations/changes, can cull", "of the population if size == None: size = self.population_size \"\"\"Removes the bottom", "# remove our start node from available del(available[0]) # if everyone's bred, break", "True self.ax = plt.gca() if len(self.summary_scores) > 0: start_num_points = len(self.summary_scores) xdata =", "update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw()", "try to choose a partner from those in available choice = random.choice(available[1:]) except", "partners # in available, then remove first node from available # range(...) ensures", "min max mean median self.node = node_class # stores the type of node", "by score from lowest to highest # The new population is the top", "new_y], adds on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for", "we get [node_index, node_score] in population_ranking population_ranking = [[x, self.score(self.population[x])] for x in", "From each node in population we get [node_index, node_score] in population_ranking population_ranking =", "= population_size self.population = [] self.past_generations = [] self.complete_scores = [] # stores", "self.graphing = False #When turned on, cull() passes new scores to the graph", "range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def initialize_population(self): #", "out #print('ruh roh') choice = [random.choice(self.population), -1] # breed with the chosen partner", "time class node(): # Has a keyword that defines it's means ofo decrypting", "in range(n): print('{0} out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing:", "self.offspring = [] for pop_num in range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) #", "self.past_generations.append(self.population) self.population = self.offspring def cull(self, size=None): #size is the final size (post", "self.offspring def cull(self, size=None): #size is the final size (post culling) of the", "of the node self.population = [self.population[x[0]] for x in population_ranking[-size:]] # The actual", "import numpy as np import cProfile import pstats import time class node(): #", "from available # range(...) ensures we breed the right number of times for", "new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim()", "graph #self.initialize_population() def initialize_graph(self): self.graphing = True self.ax = plt.gca() if len(self.summary_scores) >", "pass class algorithm(): #has a population of nodes with keywords, can breed to", "mean median self.node = node_class # stores the type of node self.scorer =", "increase the partner's breed count by one choice[1] += 1 # if the", "of times for breed_count in range(available[0][1], self.breeding_times): try: # try to choose a", "to a score, given\"\"\" algorithms = [] for num in range(n): algorithms.append(algorithm(*args, **kwargs))", "range(n): print('{0} out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for", "offspring def __init__(self, text, population_size, breeding_times, node_class): self.text = text self.breeding_times = breeding_times", "run_to_score(self, score): # Keeps cycling until the latest population's mean score is greater", "[self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes the", "with random #mutations/changes, can cull to select for best english scoring offspring def", "end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for line in algorithms[num].lines: line.remove() stats =", "matplotlib.pyplot as plt import numpy as np import cProfile import pstats import time", "my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key): pass def cycle(self, ntimes=1): # Does", "self.population = [self.population[x[0]] for x in population_ranking[-size:]] # The actual scores, with the", "english scoring offspring def __init__(self, text, population_size, breeding_times, node_class): self.text = text self.breeding_times", "[] # stores the complete score history self.summary_scores = [] # stores min", "available, then remove first node from available # range(...) ensures we breed the", "we breed the right number of times for breed_count in range(available[0][1], self.breeding_times): try:", "= len(self.summary_scores) xdata = np.array([x for x in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata,", "random import math import matplotlib.pyplot as plt import numpy as np import cProfile", "x[1]) # sort by score from lowest to highest # The new population", "for breed_count in range(available[0][1], self.breeding_times): try: # try to choose a partner from", "key def reproduce(self): pass class algorithm(): #has a population of nodes with keywords,", "them with random partners # in available, then remove first node from available", "partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed count by one choice[1] += 1", "= self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing = False #When turned on, cull() passes", "\"\"\"Makes n algorithms, returns the avg time for them to run to a", "# Can reproduce to make a mutated offspring def __init__(self, key=None): self.key =", "del(available[0]) # if everyone's bred, break the loop if len(available) == 0: break", "size self.population, hopefully near to endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def", "the node self.population = [self.population[x[0]] for x in population_ranking[-size:]] # The actual scores,", "base, breed them with random partners # in available, then remove first node", "0 self.graphing = False #When turned on, cull() passes new scores to the", "+= 1 # if the partner's bred the requisite number of times, remove", "self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes", "\\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest to highest", "self.cycles_count = 0 self.graphing = False #When turned on, cull() passes new scores", "the form [new_x, new_y], adds on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1]))", "if choice[1] == self.breeding_times: available.remove(choice) # remove our start node from available del(available[0])", "in self.summary_scores])[0] for num in range(3)] else: self.lines = [self.ax.plot([], [])[0] for num", "in population_ranking population_ranking = [[x, self.score(self.population[x])] for x in \\ range(len(self.population))] population_ranking.sort(key=lambda x:", "a node breeds, until it reaches breeding_times available = [[x, 0] for x", "available as the base, breed them with random partners # in available, then", "indices as their node counterparts in population self.ranking = [x[1] for x in", "make the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def cull(self, size=None):", "plt import numpy as np import cProfile import pstats import time class node():", "with the same indices as their node counterparts in population self.ranking = [x[1]", "make offspring with random #mutations/changes, can cull to select for best english scoring", "in range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None):", "score in self.summary_scores])[0] for num in range(3)] else: self.lines = [self.ax.plot([], [])[0] for", "requisite number of times, remove them from available if choice[1] == self.breeding_times: available.remove(choice)", "times for breed_count in range(available[0][1], self.breeding_times): try: # try to choose a partner", "self.cycle() if self.summary_scores[-1][2] > score: break def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for", "until the latest population's mean score is greater than score while True: self.cycle()", "# 0 will increment each time a node breeds, until it reaches breeding_times", "self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def update_line(self, line, new_data): #Given a line and", "self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing is turned on, send the new data", "False #When turned on, cull() passes new scores to the graph #self.initialize_population() def", "import time class node(): # Has a keyword that defines it's means ofo", "breed them with random partners # in available, then remove first node from", "a keyword that defines it's means ofo decrypting the text # Can reproduce", "The new population is the top population_size guys as ranked # x[0] is", "self.summary_scores])[0] for num in range(3)] else: self.lines = [self.ax.plot([], [])[0] for num in", "lowest to highest # The new population is the top population_size guys as", "by one choice[1] += 1 # if the partner's bred the requisite number", "toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing is", "= [] # 0 will increment each time a node breeds, until it", "(post culling) of the population if size == None: size = self.population_size \"\"\"Removes", "on, cull() passes new scores to the graph #self.initialize_population() def initialize_graph(self): self.graphing =", "on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num in", "remove them from available if choice[1] == self.breeding_times: available.remove(choice) # remove our start", "their node counterparts in population self.ranking = [x[1] for x in population_ranking[-size:]] #score", "to run to a score, given\"\"\" algorithms = [] for num in range(n):", "population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)]", "[] self.past_generations = [] self.complete_scores = [] # stores the complete score history", "None: times = self.breeding_times self.offspring = [] for pop_num in range(size): for breed_num", "[random.choice(self.population), -1] # breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's", "the parent generation, make the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring", "size == None: size = self.population_size if times == None: times = self.breeding_times", "= self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing = False #When turned", "self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole load", "the first node in available as the base, breed them with random partners", "population self.ranking = [x[1] for x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile =", "cProfile.Profile() for num in range(n): print('{0} out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(),", "= [[x, self.score(self.population[x])] for x in \\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort", "new data to the graph if self.graphing == True: self.update_graph() class genetic_algorithm(algorithm): def", "= self.population_size if times == None: times = self.breeding_times self.offspring = [] for", "given\"\"\" algorithms = [] for num in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph()", "it reaches breeding_times available = [[x, 0] for x in self.population] # who", "in available, then remove first node from available # range(...) ensures we breed", "a score, given\"\"\" algorithms = [] for num in range(n): algorithms.append(algorithm(*args, **kwargs)) if", "can breed to make offspring with random #mutations/changes, can cull to select for", "def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key): pass def cycle(self, ntimes=1):", "range(...) ensures we breed the right number of times for breed_count in range(available[0][1],", "\"\"\"Removes the bottom scorers of the population until the population fits population_size\"\"\" #", "*args, graphing=False, **kwargs): \"\"\"Makes n algorithms, returns the avg time for them to", "stores min max mean median self.node = node_class # stores the type of", "def initialize_population(self): # Initializes the population with size self.population, hopefully near to endpoint", "roh') choice = [random.choice(self.population), -1] # breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) #", "def run_to_score(self, score): # Keeps cycling until the latest population's mean score is", "load of newly bred offspring, randomly selecting who pairs with who\"\"\" if size", "# The new population is the top population_size guys as ranked # x[0]", "as ranked # x[0] is the index of the node self.population = [self.population[x[0]]", "-1] # breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed", "for num in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for", "the requisite number of times, remove them from available if choice[1] == self.breeding_times:", "self.lines = [self.ax.plot([], [])[0] for num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion()", "[] for pop_num in range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the", "1 # if the partner's bred the requisite number of times, remove them", "in \\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest to", "for num in range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self,", "of breed and cull for num in range(ntimes): self.cycles_count += 1 self.breed() self.cull()", "times == None: times = self.breeding_times self.offspring = [] for pop_num in range(size):", "until the population fits population_size\"\"\" # From each node in population we get", "on, send the new data to the graph if self.graphing == True: self.update_graph()", "import cProfile import pstats import time class node(): # Has a keyword that", "available if choice[1] == self.breeding_times: available.remove(choice) # remove our start node from available", "for num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def update_line(self, line,", "# The actual scores, with the same indices as their node counterparts in", "ranked # x[0] is the index of the node self.population = [self.population[x[0]] for", "in range(available[0][1], self.breeding_times): try: # try to choose a partner from those in", "= np.array([x for x in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for score", "if times == None: times = self.breeding_times self.offspring = [] # 0 will", "the text # Can reproduce to make a mutated offspring def __init__(self, key=None):", "score history self.summary_scores = [] # stores min max mean median self.node =", "size = self.population_size if times == None: times = self.breeding_times self.offspring = []", "population of nodes with keywords, can breed to make offspring with random #mutations/changes,", "self.population_size if times == None: times = self.breeding_times self.offspring = [] # 0", "np.array([x for x in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for score in", "stores the complete score history self.summary_scores = [] # stores min max mean", "new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n, score, algorithm, *args,", "# increase the partner's breed count by one choice[1] += 1 # if", "parent will breed self.population_size = population_size self.population = [] self.past_generations = [] self.complete_scores", "ntimes=1): # Does ntimes cycles of breed and cull for num in range(ntimes):", "make the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n, score,", "for num in range(ntimes): self.cycles_count += 1 self.breed() self.cull() def run_to_score(self, score): #", "algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for num in range(n): print('{0}", "is the final size (post culling) of the population if size == None:", "[node_index, node_score] in population_ranking population_ranking = [[x, self.score(self.population[x])] for x in \\ range(len(self.population))]", "times = self.breeding_times self.offspring = [] for pop_num in range(size): for breed_num in", "population_ranking = [[x, self.score(self.population[x])] for x in \\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) #", "if self.graphing == True: self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None, times=None): \"\"\"Replaces self.population", "in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation, make the new population the", "x in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for", "median self.node = node_class # stores the type of node self.scorer = basics.ngram_score('english_trigrams.txt',", "def cull(self, size=None): #size is the final size (post culling) of the population", "if graphing is turned on, send the new data to the graph if", "= [] self.complete_scores = [] # stores the complete score history self.summary_scores =", "for x in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0]", "x[0] is the index of the node self.population = [self.population[x[0]] for x in", "breeding_times available = [[x, 0] for x in self.population] # who is left", "[] for num in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile()", "IndexError: #Sometimes the last guy gets left out #print('ruh roh') choice = [random.choice(self.population),", "new population is the top population_size guys as ranked # x[0] is the", "take the first node in available as the base, breed them with random", "population_ranking population_ranking = [[x, self.score(self.population[x])] for x in \\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1])", "the new data to the graph if self.graphing == True: self.update_graph() class genetic_algorithm(algorithm):", "algorithm(): #has a population of nodes with keywords, can breed to make offspring", "offspring def __init__(self, key=None): self.key = key def reproduce(self): pass class algorithm(): #has", "population_size, breeding_times, node_class): self.text = text self.breeding_times = breeding_times # how many times", "many times each parent will breed self.population_size = population_size self.population = [] self.past_generations", "a whole load of newly bred offspring, randomly selecting who pairs with who\"\"\"", "node breeds, until it reaches breeding_times available = [[x, 0] for x in", "than score while True: self.cycle() if self.summary_scores[-1][2] > score: break def turnover(self, breeding_times,", "print('{0} out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for line", "first node from available # range(...) ensures we breed the right number of", "text # Can reproduce to make a mutated offspring def __init__(self, key=None): self.key", "self.breeding_times self.offspring = [] # 0 will increment each time a node breeds,", "text, key): pass def cycle(self, ntimes=1): # Does ntimes cycles of breed and", "key=None): self.key = key def reproduce(self): pass class algorithm(): #has a population of", "make a mutated offspring def __init__(self, key=None): self.key = key def reproduce(self): pass", "available choice = random.choice(available[1:]) except IndexError: #Sometimes the last guy gets left out", "def decrypt(self, text, key): pass def cycle(self, ntimes=1): # Does ntimes cycles of", "= 0 self.graphing = False #When turned on, cull() passes new scores to", "the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def cull(self, size=None): #size", "form [new_x, new_y], adds on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def", "[] # stores min max mean median self.node = node_class # stores the", "self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole load", "is the top population_size guys as ranked # x[0] is the index of", "ntimes cycles of breed and cull for num in range(ntimes): self.cycles_count += 1", "# take the first node in available as the base, breed them with", "[new_x, new_y], adds on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self):", "to choose a partner from those in available choice = random.choice(available[1:]) except IndexError:", "# range(...) ensures we breed the right number of times for breed_count in", "reproduce(self): pass class algorithm(): #has a population of nodes with keywords, can breed", "plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes the population with size self.population, hopefully near", "self.population = self.offspring def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs): \"\"\"Makes n algorithms,", "node_class # stores the type of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score =", "new scores to the graph #self.initialize_population() def initialize_graph(self): self.graphing = True self.ax =", "to highest # The new population is the top population_size guys as ranked", "= True self.ax = plt.gca() if len(self.summary_scores) > 0: start_num_points = len(self.summary_scores) xdata", "x in \\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest", "score is greater than score while True: self.cycle() if self.summary_scores[-1][2] > score: break", "for x in \\ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort by score from", "breed(self, size=None, times=None): \"\"\"Replaces self.population with a whole load of newly bred offspring,", "new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num],", "times == None: times = self.breeding_times self.offspring = [] # 0 will increment", "self.population, hopefully near to endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self,", "# who is left available while True: # take the first node in", "bred the requisite number of times, remove them from available if choice[1] ==", "our start node from available del(available[0]) # if everyone's bred, break the loop", "from available if choice[1] == self.breeding_times: available.remove(choice) # remove our start node from", "the index of the node self.population = [self.population[x[0]] for x in population_ranking[-size:]] #", "num in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for num", "in population we get [node_index, node_score] in population_ranking population_ranking = [[x, self.score(self.population[x])] for", "keywords, can breed to make offspring with random #mutations/changes, can cull to select", "for x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)]", "return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key): pass def cycle(self, ntimes=1): # Does ntimes", "cycle(self, ntimes=1): # Does ntimes cycles of breed and cull for num in", "node in population we get [node_index, node_score] in population_ranking population_ranking = [[x, self.score(self.population[x])]", "range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None): \"\"\"Replaces", "with size self.population, hopefully near to endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key)))", "population we get [node_index, node_score] in population_ranking population_ranking = [[x, self.score(self.population[x])] for x", "defines it's means ofo decrypting the text # Can reproduce to make a", "range(available[0][1], self.breeding_times): try: # try to choose a partner from those in available", "in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for num", "= [self.population[x[0]] for x in population_ranking[-size:]] # The actual scores, with the same", "pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key): pass def cycle(self,", "else: self.lines = [self.ax.plot([], [])[0] for num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True)", "self.offspring def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs): \"\"\"Makes n algorithms, returns the", "turned on, send the new data to the graph if self.graphing == True:", "Does ntimes cycles of breed and cull for num in range(ntimes): self.cycles_count +=", "while True: # take the first node in available as the base, breed", "increment each time a node breeds, until it reaches breeding_times available = [[x,", "if len(self.summary_scores) > 0: start_num_points = len(self.summary_scores) xdata = np.array([x for x in", "of nodes with keywords, can breed to make offspring with random #mutations/changes, can", "available del(available[0]) # if everyone's bred, break the loop if len(available) == 0:", "pass def cycle(self, ntimes=1): # Does ntimes cycles of breed and cull for", "None: size = self.population_size \"\"\"Removes the bottom scorers of the population until the", "sort by score from lowest to highest # The new population is the", "adds on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num", "'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing = False", "for best english scoring offspring def __init__(self, text, population_size, breeding_times, node_class): self.text =", "same indices as their node counterparts in population self.ranking = [x[1] for x", "# if everyone's bred, break the loop if len(available) == 0: break #", "graphing is turned on, send the new data to the graph if self.graphing", "# sort by score from lowest to highest # The new population is", "= self.breeding_times self.offspring = [] # 0 will increment each time a node", "= self.population_size if times == None: times = self.breeding_times self.offspring = [] #", "scoring offspring def __init__(self, text, population_size, breeding_times, node_class): self.text = text self.breeding_times =", "in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None): \"\"\"Replaces self.population with a", "x in self.population] # who is left available while True: # take the", "chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed count by one choice[1] +=", "random #mutations/changes, can cull to select for best english scoring offspring def __init__(self,", "the last guy gets left out #print('ruh roh') choice = [random.choice(self.population), -1] #", "= [] for pop_num in range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive", "from lowest to highest # The new population is the top population_size guys", "0: break # archive the parent generation, make the new population the offspring.", "= node_class # stores the type of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score", "for score in self.summary_scores])[0] for num in range(3)] else: self.lines = [self.ax.plot([], [])[0]", "key): pass def cycle(self, ntimes=1): # Does ntimes cycles of breed and cull", "max mean median self.node = node_class # stores the type of node self.scorer", "scorers of the population until the population fits population_size\"\"\" # From each node", "size == None: size = self.population_size \"\"\"Removes the bottom scorers of the population", "graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for num in range(n): print('{0} out of {1}:'.format(num+1,", "import pstats import time class node(): # Has a keyword that defines it's", "[] # 0 will increment each time a node breeds, until it reaches", "population_ranking[-size:]] # The actual scores, with the same indices as their node counterparts", "node counterparts in population self.ranking = [x[1] for x in population_ranking[-size:]] #score keeping", "generation, make the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n,", "self.lines = [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for num in range(3)] else:", "[self.population[x[0]] for x in population_ranking[-size:]] # The actual scores, with the same indices", "with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed count by one", "0: start_num_points = len(self.summary_scores) xdata = np.array([x for x in range(1, start_num_points)]) self.lines", "{1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for line in algorithms[num].lines: line.remove()", "= breeding_times # how many times each parent will breed self.population_size = population_size", "# stores the type of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text)))", "break the loop if len(available) == 0: break # archive the parent generation,", "available = [[x, 0] for x in self.population] # who is left available", "self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes the population with size", "locals()) if graphing: for line in algorithms[num].lines: line.remove() stats = pstats.Stats() stats.add(prof) return(stats)", "to select for best english scoring offspring def __init__(self, text, population_size, breeding_times, node_class):", "mean score is greater than score while True: self.cycle() if self.summary_scores[-1][2] > score:", "newly bred offspring, randomly selecting who pairs with who\"\"\" if size == None:", "times=None): \"\"\"Replaces self.population with a whole load of newly bred offspring, randomly selecting", "each parent will breed self.population_size = population_size self.population = [] self.past_generations = []", "def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for num in range(num_cycles_large): for num in", "of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for line in algorithms[num].lines:", "self.graphing = True self.ax = plt.gca() if len(self.summary_scores) > 0: start_num_points = len(self.summary_scores)", "count by one choice[1] += 1 # if the partner's bred the requisite", "self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation, make the new population the offspring. self.past_generations.append(self.population)", "botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) #", "pairs with who\"\"\" if size == None: size = self.population_size if times ==", "def update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True)", "= key def reproduce(self): pass class algorithm(): #has a population of nodes with", "1 self.breed() self.cull() def run_to_score(self, score): # Keeps cycling until the latest population's", "# archive the parent generation, make the new population the offspring. self.past_generations.append(self.population) self.population", "population the offspring. self.past_generations.append(self.population) self.population = self.offspring def cull(self, size=None): #size is the", "one choice[1] += 1 # if the partner's bred the requisite number of", "self.breeding_times): try: # try to choose a partner from those in available choice", "self.population = self.offspring def cull(self, size=None): #size is the final size (post culling)", "initialize_population(self): # Initializes the population with size self.population, hopefully near to endpoint pass", "returns the avg time for them to run to a score, given\"\"\" algorithms", "True: self.cycle() if self.summary_scores[-1][2] > score: break def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize):", "to make offspring with random #mutations/changes, can cull to select for best english", "+= 1 self.breed() self.cull() def run_to_score(self, score): # Keeps cycling until the latest", "each node in population we get [node_index, node_score] in population_ranking population_ranking = [[x,", "population_size guys as ranked # x[0] is the index of the node self.population", "prof = cProfile.Profile() for num in range(n): print('{0} out of {1}:'.format(num+1, n), end='')", "[])[0] for num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def update_line(self,", "to the graph if self.graphing == True: self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None,", "and new_data of the form [new_x, new_y], adds on the new values line.set_xdata(np.append(line.get_xdata(),", "fits population_size\"\"\" # From each node in population we get [node_index, node_score] in", "run to a score, given\"\"\" algorithms = [] for num in range(n): algorithms.append(algorithm(*args,", "n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for line in algorithms[num].lines: line.remove() stats", "who is left available while True: # take the first node in available", "[[x, 0] for x in self.population] # who is left available while True:", "for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation, make the new", "range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for num in", "means ofo decrypting the text # Can reproduce to make a mutated offspring", "of the form [new_x, new_y], adds on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(),", "best english scoring offspring def __init__(self, text, population_size, breeding_times, node_class): self.text = text", "near to endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key):", "as their node counterparts in population self.ranking = [x[1] for x in population_ranking[-size:]]", "if the partner's bred the requisite number of times, remove them from available", "decrypt(self, text, key): pass def cycle(self, ntimes=1): # Does ntimes cycles of breed", "algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs): \"\"\"Makes n algorithms, returns the avg time", "= text self.breeding_times = breeding_times # how many times each parent will breed", "available while True: # take the first node in available as the base,", "== None: times = self.breeding_times self.offspring = [] for pop_num in range(size): for", "turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for num in range(num_cycles_large): for num in range(num_cycles_small):", "graph if self.graphing == True: self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None, times=None): \"\"\"Replaces", "is greater than score while True: self.cycle() if self.summary_scores[-1][2] > score: break def", "median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing is turned on, send", "a population of nodes with keywords, can breed to make offspring with random", "the avg time for them to run to a score, given\"\"\" algorithms =", "breeding_times, num_cycles_small, num_cycles_large, cullsize): for num in range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population),", "time for them to run to a score, given\"\"\" algorithms = [] for", "\"\"\"Replaces self.population with a whole load of newly bred offspring, randomly selecting who", "basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing =", "index of the node self.population = [self.population[x[0]] for x in population_ranking[-size:]] # The", "those in available choice = random.choice(available[1:]) except IndexError: #Sometimes the last guy gets", "# Has a keyword that defines it's means ofo decrypting the text #", "time a node breeds, until it reaches breeding_times available = [[x, 0] for", "if len(available) == 0: break # archive the parent generation, make the new", "== 0: break # archive the parent generation, make the new population the", "= plt.gca() if len(self.summary_scores) > 0: start_num_points = len(self.summary_scores) xdata = np.array([x for", "offspring, randomly selecting who pairs with who\"\"\" if size == None: size =", "self.population] # who is left available while True: # take the first node", "num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def update_line(self, line, new_data):", "in self.population] # who is left available while True: # take the first", "line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True,", "plt.ion() plt.show() def update_line(self, line, new_data): #Given a line and new_data of the", "self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes the population", "a mutated offspring def __init__(self, key=None): self.key = key def reproduce(self): pass class", "import random import math import matplotlib.pyplot as plt import numpy as np import", "self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing = False #When turned on,", "line, new_data): #Given a line and new_data of the form [new_x, new_y], adds", "choose a partner from those in available choice = random.choice(available[1:]) except IndexError: #Sometimes", "self.breeding_times = breeding_times # how many times each parent will breed self.population_size =", "self.cull() def run_to_score(self, score): # Keeps cycling until the latest population's mean score", "with a whole load of newly bred offspring, randomly selecting who pairs with", "self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing = False #When", "self.complete_scores = [] # stores the complete score history self.summary_scores = [] #", "the offspring. self.past_generations.append(self.population) self.population = self.offspring def cull(self, size=None): #size is the final", "counterparts in population self.ranking = [x[1] for x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking)", "cull(self, size=None): #size is the final size (post culling) of the population if", "num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None): \"\"\"Replaces self.population with", "def cycle(self, ntimes=1): # Does ntimes cycles of breed and cull for num", "offspring with random #mutations/changes, can cull to select for best english scoring offspring", "self.node = node_class # stores the type of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt')", "actual scores, with the same indices as their node counterparts in population self.ranking", "break def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for num in range(num_cycles_large): for num", "if size == None: size = self.population_size if times == None: times =", "def __init__(self, key=None): self.key = key def reproduce(self): pass class algorithm(): #has a", "population until the population fits population_size\"\"\" # From each node in population we", "x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median", "size = self.population_size \"\"\"Removes the bottom scorers of the population until the population", "def reproduce(self): pass class algorithm(): #has a population of nodes with keywords, can", "send the new data to the graph if self.graphing == True: self.update_graph() class", "and cull for num in range(ntimes): self.cycles_count += 1 self.breed() self.cull() def run_to_score(self,", "new_data): #Given a line and new_data of the form [new_x, new_y], adds on", "the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num in range(len(self.lines)):", "self.population with a whole load of newly bred offspring, randomly selecting who pairs", "#print('ruh roh') choice = [random.choice(self.population), -1] # breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0]))", "self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs): \"\"\"Makes n", "keyword that defines it's means ofo decrypting the text # Can reproduce to", "self.ranking = [x[1] for x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)]", "the base, breed them with random partners # in available, then remove first", "the population fits population_size\"\"\" # From each node in population we get [node_index,", "Has a keyword that defines it's means ofo decrypting the text # Can", "until it reaches breeding_times available = [[x, 0] for x in self.population] #", "self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing is turned on,", "partner's bred the requisite number of times, remove them from available if choice[1]" ]
[ "\\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\", "\\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\", "\\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\", "# WARNING! All changes made in this file will be lost! # from", "\\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\", "\\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \"", "\\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\", "\\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\", "\\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\", "\\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\", "\\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\", "\\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\", "\\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\", "\\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\", "\\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\", "\\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\", "\\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\", "\\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\", "\\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\", "\\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\", "\\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\", "All changes made in this file will be lost! # from PyQt5 import", "\\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\", "\\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\", "\\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\", "\\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\", "file will be lost! # from PyQt5 import QtCore from silx.gui import qt", "\\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\", "\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\", "\\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\", "\\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\", "\\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\", "\\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\", "\\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\", "\\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\", "\\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\", "\\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\", "\\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\", "The Resource Compiler for PyQt5 (Qt v5.12.5) # # WARNING! All changes made", "\\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\", "\\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\", "\\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\", "\\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\", "\\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\", "\\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\", "\\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\", "\\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\", "qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v)", "\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v) for v in QtCore.qVersion().split('.')]", "\\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\", "v in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct", "\\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\", "\\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\", "\\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\", "\\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\", "\\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\", "0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct =", "\\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\", "\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\", "from silx.gui import qt as QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\", "\\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\", "\\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\", "\\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\", "\\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\", "\\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\", "\\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\", "\\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\", "\\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\", "\\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\", "\\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\", "= b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\", "\\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\", "\\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\", "\\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\", "\\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\", "\\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\", "\\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\", "\\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\", "\\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\", "\\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\", "b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v) for v", "\\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\", "\\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\", "lost! # from PyQt5 import QtCore from silx.gui import qt as QtCore qt_resource_data", "\\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\", "\\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\", "\\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\", "\\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\", "\\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\", "\\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\", "\\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\", "\\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\", "\\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\", "= 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def", "\\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\", "\\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\", "\\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\", "rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def", "\\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\", "\\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\", "\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v) for v in", "\\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\", "coding: utf-8 -*- # Resource object code # # Created by: The Resource", "\\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\", "as QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\", "\\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\", "\\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\", "\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version", "\\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\", "\\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\", "\\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\", "\\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\", "\\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\", "\\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\", "\\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\", "\\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\", "be lost! # from PyQt5 import QtCore from silx.gui import qt as QtCore", "= [int(v) for v in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version", "\\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\", "\\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\", "\\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\", "import QtCore from silx.gui import qt as QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\", "\\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\", "\\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\", "\\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\", "\\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\", "for v in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version = 1", "\\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\", "\\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\", "\\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\", "\\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\", "\\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\", "\\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\", "\\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\", "\\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\", "\\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v) for v in QtCore.qVersion().split('.')] if qt_version < [5,", "\\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\", "\\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\", "\\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\", "\\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\", "\\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\", "\\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\", "\\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\", "\\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\", "\\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\", "\\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\", "\\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\", "\\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\", "\\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\", "\\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\", "# # Created by: The Resource Compiler for PyQt5 (Qt v5.12.5) # #", "\\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\", "\\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\", "\\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\", "\\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\", "\\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\", "\\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\", "= b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v) for", "\\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\", "\\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\", "\\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\", "\\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\", "\\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\", "\" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \"", "\\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\", "in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct =", "\\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\", "\\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\", "\\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\", "\\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\", "= b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 =", "\\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\", "\\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\", "\\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\", "= 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources():", "# -*- coding: utf-8 -*- # Resource object code # # Created by:", "\\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\", "qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\", "\\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\", "# from PyQt5 import QtCore from silx.gui import qt as QtCore qt_resource_data =", "\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v) for v in QtCore.qVersion().split('.')] if qt_version <", "rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2", "\\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\", "\\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\", "import qt as QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\", "1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources():", "\\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\", "\\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\", "\\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\", "\\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\", "\\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\", "\\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\", "\\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\", "\\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\", "\\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\", "\\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\", "\\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\", "\\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\", "\\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\", "\\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\", "if qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else:", "\\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\", "\\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\", "\\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\", "object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.12.5)", "\\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\", "\\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 =", "\\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\", "\\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\", "\\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\", "\\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\", "\\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\", "\\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\", "\\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\", "\\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\", "\\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\", "\\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\", "\\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\", "PyQt5 (Qt v5.12.5) # # WARNING! All changes made in this file will", "\\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\", "\\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \"", "\\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\", "\\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\", "\\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\", "\\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\", "\\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\", "\\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\", "\\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\", "from PyQt5 import QtCore from silx.gui import qt as QtCore qt_resource_data = b\"\\", "\\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\", "\\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\", "\\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\", "\\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\", "\\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\", "[5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2", "\\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\", "\\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\", "in this file will be lost! # from PyQt5 import QtCore from silx.gui", "\\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\", "\\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\", "\\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\", "(Qt v5.12.5) # # WARNING! All changes made in this file will be", "\\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\", "\\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\", "\\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\", "\\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\", "< [5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version =", "\\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\", "\\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\", "\\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\", "\\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\", "\\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\", "\\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\", "\\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\", "\\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\", "\\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\", "[int(v) for v in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version =", "\\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\", "\\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\", "\\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\", "\\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\", "\" qt_version = [int(v) for v in QtCore.qVersion().split('.')] if qt_version < [5, 8,", "\\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\", "\\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\", "\\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\", "b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\", "\\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\", "\\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\", "\\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\", "\\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\", "\\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\", "\\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\", "v5.12.5) # # WARNING! All changes made in this file will be lost!", "\\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\", "\\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\", "\\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\", "\\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\", "b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\", "\\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\", "\\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\", "\\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\", "QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1", "\\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\", "\\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\", "qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)", "-*- coding: utf-8 -*- # Resource object code # # Created by: The", "= b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\", "\\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\", "\\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\", "will be lost! # from PyQt5 import QtCore from silx.gui import qt as", "\\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\", "\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\", "\\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\", "\\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\", "\\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\", "Compiler for PyQt5 (Qt v5.12.5) # # WARNING! All changes made in this", "\\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\", "\\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\", "\\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\", "\\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\", "QtCore from silx.gui import qt as QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\", "\\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\", "\" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version =", "\\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\", "\\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\", "\\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\", "\\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\", "qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\", "\\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\", "\\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\", "\\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\", "WARNING! All changes made in this file will be lost! # from PyQt5", "\\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\", "\\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\ \\xdc\\x53\\x15\\x49\\xdd\\xab\\x36\\xef\\xef\\xfa\\xa2\\xf0\\xa0\\x16\\x76\\x3d\\ \\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\", "\\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\", "\\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\", "\\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\", "\\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\", "\\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\", "\\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\", "= qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name,", "\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v) for v in QtCore.qVersion().split('.')] if", "\\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\", "this file will be lost! # from PyQt5 import QtCore from silx.gui import", "\\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\", "\\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\", "\\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\", "qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name,", "silx.gui import qt as QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\", "\\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\", "\\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\", "else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)", "for PyQt5 (Qt v5.12.5) # # WARNING! All changes made in this file", "\\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\", "qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct,", "# Created by: The Resource Compiler for PyQt5 (Qt v5.12.5) # # WARNING!", "\\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\ \\x43\\x17\\xcb\\xa3\\xd8\\xa3\\x48\\xd9\\xf9\\xd2\\xe0\\x2e\\xda\\x7d\\x08\\x0d\\ \\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\", "\\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\", "qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version,", "\\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\", "\\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\", "\\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\", "\\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\", "\\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\", "\\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\", "changes made in this file will be lost! # from PyQt5 import QtCore", "\\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\", "\\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\", "utf-8 -*- # Resource object code # # Created by: The Resource Compiler", "\\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\", "\\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\", "\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \"", "\\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\", "made in this file will be lost! # from PyQt5 import QtCore from", "\\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\", "\\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2", "\\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\", "\\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\", "\\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\", "\\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\", "\\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\", "\\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\", "\\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\", "\\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\", "\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\", "\\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\", "\\x94\\x57\\x7f\\x84\\x90\\x0a\\x3d\\x52\\x62\\xa2\\x94\\x9a\\x2c\\x0d\\x34\\x54\\ \\x7f\\x00\\x5d\\x27\\xf4\\x80\\xd0\\x0f\\x42\\xaf\\x0b\\xad\\x15\\xaa\\x8d\\xa3\\ \\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\", "by: The Resource Compiler for PyQt5 (Qt v5.12.5) # # WARNING! All changes", "\\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\", "\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\", "\\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\", "\\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\", "\\xaf\\x84\\xaa\\x02\\x28\\x7e\\xef\\x00\\x89\\xe7\\xd5\\x41\\x95\\x9a\\x39\\x25\\ \\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\", "8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct", "\\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\", "\\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\", "\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\", "\\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\ \\x0a\\x94\\x63\\x1a\\x3e\\x7c\\x5d\\x93\\xd1\\xf8\\x16\\xd2\\x9a\\x9b\\x70\\x75\\ \\xf6\\x84\\x68\\x6f\\x28\\x3d\\xc3\\xd0\\x4b\\xcb\\xc9\\x8c\\x2c\\x62\\xdf\\xb2\\ \\x9d\\xdc\\xb2\\xfb\\x08\\x1e\\xef\\x11\\x16\\x56\\x26\\x71\\xdb\\xb1\\x5f\\xf8\\ \\x57\\x82\\x87\\x65\\x56\\x0e\\x2c\\xd9\\xde\\x45\\x5a\\x81\\x74\\x27\\x6a\\xac\\", "\\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\", "\\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\", "qt as QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\", "\\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\", "\\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\", "code # # Created by: The Resource Compiler for PyQt5 (Qt v5.12.5) #", "\\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\", "\\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\", "\\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\", "# # WARNING! All changes made in this file will be lost! #", "\\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\ \\x88\\xb9\\xc1\\x9d\\x03\\x81\\x81\\xc0\\xd3\\xfb\\xc1\\x3c\\x03\\xec\\x43\\x44\\ \\x11\\xb3\\x49\\x9a\\xf3\\x24\\x7c\\x9c\\x45\\x6c\\x8c\\xa0\\xcb\\xef\\xb8\\xe8\\", "\\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\", "-*- # Resource object code # # Created by: The Resource Compiler for", "\\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\", "\" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \" qt_resource_struct_v2 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\", "qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1", "\\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\", "\\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\", "\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\", "b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\", "\\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\", "\\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\", "Resource Compiler for PyQt5 (Qt v5.12.5) # # WARNING! All changes made in", "\\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\", "\\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\", "\\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\ \\xfc\\x06\\xe1\\x2d\\x98\\x3b\\x3c\\x50\\xb2\\x1a\\x88\\x09\\x12\\x5a\\x38\\x36\\ \\x0b\\xfa\\x06\\xa0\\xe7\\x5c\\xe7\\xc6\\xa6\\x3d\\x13\\xcc\\x3f\\x61\\xc6\\x87\\ \\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\", "\\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\", "\\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\", "\\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\", "\\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\ \\xa0\\x85\\x24\\xb0\\x86\\x43\\x52\\x12\\xdc\\xd5\\x88\\xc1\\x6b\\x74\\xa3\\x83\\ \\x14\\x22\\x54\\x60\\x50\\x13\\xbd\\x04\\xbe\\xee\\x83\\xf9\\xdb\\x4d\\x16\\xdb\\ \\x80\\xc3\\x5e\\x68\\xbe\\xd5\\xd1\\xa1\\x69\\x10\\x1a\\x69\\xef\\xf2\\x7a\\xe0\\ \\xd8\\x0b\\x31\\x98\\x52\\x03\\xae\\x3f\\xec\\x33\\xa8\\xf0\\x91\\x7a\\xc8\\x2e\\ \\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\", "\\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\", "\\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\", "\\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\", "\\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\", "\\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\", "\\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\", "\\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\", "\\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\", "\\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\", "\\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\ \\xad\\x49\\xc4\\xbf\\x7c\\x93\\x0e\\x6e\\x64\\x37\\x06\\xb5\\x18\\x40\\x9c\\xae\\", "\\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\ \\x02\\xe9\\xf6\\x8e\\xfa\\x0e\\x50\\x7b\\x67\\x88\\x46\\x20\\x94\\x05\\x89\\x7d\\ \\xa3\\x50\\xd3\\xe2\\x7c\\xae\\x0b\\x60\\x80\\x4a\\xe0\\xf8\\x60\\xdc\\xcf\\x54\\ \\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\", "# Resource object code # # Created by: The Resource Compiler for PyQt5", "\\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\", "\\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\", "\\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\", "\\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\", "\\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\ \\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\", "\\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\ \\xa2\\x33\\xd4\\xb6\\xb1\\x05\\x6b\\xed\\x84\\x5d\\x56\\x0a\\x4c\\x32\\x00\\xc0\\ \\x3a\\x0f\\xaa\\xc6\\x90\\xfc\\x9c\\xcd\\x03\\x38\\x11\\x20\\xd7\\xb1\\x80\\x03\\ \\x42\\x90\\x71\\xbd\\x17\\xfe\\xbc\\xd0\\x71\\x6e\\x2e\\x7b\\x03\\x2e\\xdd\\x4d\\ \\xe4\\x83\\x00\\x34\\x80\\xb1\\x74\\x08\\xc4\\xaf\\x43\\xe4\\x80\\x6b\\x05\\x5c\\ \\x12\\x84\\xca\\xae\\x8e\\x2f\\xe9\\x84\\x90\\xcb\\xde\\x9d\\x92\\x33\\x44\\xe9\\", "\\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\ \\x83\\x82\\xed\\x90\\xb1\\x0a\\xd8\\x9e\\x08\\xa6\\x05\\xe3\\xc3\\xfc\\xf4\\xb1\\ \\x18\\x14\\x85\\xdc\\x1b\\x80\\x03\\x06\\x74\\x26\\xa2\\xe2\\x04\\x3a\\x1f\\x69\\", "\\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\", "\\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\ \\x1c\\xa1\\xc6\\xdf\\x4c\\xa9\\xb7\\x47\\xa2\\x54\\xe2\\x42\\x89\\xfb\\xa4\\xa4\\ \\x73\\xb4\\xa6\\x02\\xad\\x6e\\x41\\xea\\xe6\\x93\\xb8\\xd5\\x99\\x0d\\xfe\\x4b\\ \\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\", "\\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\ \\xbe\\x07\\xdc\\x33\\x93\\xc0\\x17\\x81\\xd0\\x00\\xf0\\x1d\\x22\\xb8\\x3c\\x82\\ \\xd5\\x08\\xc9\\xff\\x01\\xca\\x52\\x80\\x9e\\xe0\\xe9\\xc0\\xba\\xfe\\x34\\x07\\ \\x66\\x84\\x38\\x3a\\x0a\\x48\\x80\\x0e\\x5b\\x5d\\x70\\x01\\x05\\x40\\x7e\\x0c\\ \\x52\\x6a\\xc0\\xfc\\xb7\\x1b\\x3e\\x1a\\x01\\x91\\x99\\x40\\x37\\xa0\\x0e\\x92\\ \\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\", "\\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\", "\\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\ \\x4e\\x8e\\x7e\\xda\\xcc\\xa6\\x02\\x1b\\x30\\xa5\\x0e\\xc1\\xa4\\x01\\xed\\x40\\ \\x7f\\xc0\\x72\\x40\\x97\\xb2\\x1d\\xdc\\x73\\xbd\\x18\\xdb\\x87\\x43\\x60\\x18\\ \\xf6\\xf5\\x45\\xa1\\x73\\x5a\\x47\\x1c\\x85\\x1f\\x07\\xb8\\x11\\x11\\x0c\\x63\\ \\x09\\x5c\\xf1\\x5f\\xd8\\x52\\x02\\xc1\\x73\\x81\\x09\\x58\\x4c\\xc4\\x34\\x1f\\ \\x02\\xb7\\x0b\\x2b\\xf2\\x10\\x15\\xf4\\x27\\x07\\x51\\xe5\\x3a\\x4b\\xbf\\xd4\\ \\x67\\x20\\xb0\\x0d\\x3c\\x60\\xe8\\x35\\xd4\\x36\\x13\\x52\\xd7\\x39\\xc7\\x9a\\ \\x0a\\xbc\\x66\\x6f\\xc4\\xe6\\x75\\xb0\\x3c\\x03\\x1e\\x5d\\x09\\xc5\\x37\\x26\\ \\x43\\xa8\\x14\\xe8\\x05\\xae\\xc3\\x30\\xb1\\x8c\\xda\\xd7\\xa2\\xfc\\xd6\\xcb\\ \\xee\\xcf\\x7c\\xa7\\x80\\x11\\x0d\\x90\\xf5\\x36\\xf0\\x41\\x3e\\x9c\\xfc\\xbb\\ \\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\", "\\xbc\\x30\\x3f\\x42\\xa6\\x63\\x2e\\x52\\x9d\\x1b\\x9b\\x06\\x87\\xb5\\xf2\\x1d\\ \\xf2\\x39\\x3b\\x1b\\x98\\x71\\x04\\xdb\\x17\\xec\\x06\\x7a\\x90\\xba\\x60\\x22\\ \\x5d\\x57\\x83\\x31\\x59\\x90\\xfe\\x81\\x8d\\x41\\x2b\\x1b\\xfe\\xf4\\xdb\\x4d\\ \\x4e\\x93\\xc3\\x8b\\x95\\x50\\x0b\\x66\\xe7\\x20\\xba\\x63\\x51\\x08\\x98\\xb9\\ \\x0f\\xc3\\x40\\x37\\xd6\\xca\\x77\\xe8\\xe0\\x16\\xc2\\x24\\x72\\x35\\x01\\x7a\\ \\x02\\x73\\xdc\\xaf\\xe2\\x4e\\x9a\\x00\\xd9\\x41\\x38\\xf8\\x2b\\xd2\\x30\\xa4\\ \\x41\\x48\\x2f\\xdb\\x01\\xa9\\x7d\\x09\\xd2\\x40\\xaf\\xe4\\x2a\\x92\\xe8\\xa7\\ \\xce\\xfb\\x0d\\xbd\\x1e\\x43\\xcb\\x85\\x2a\\xcb\\x50\\xe0\\x15\\x14\\x7e\\xd3\\ \\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\", "\\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\", "\\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\", "\\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\", "\\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\", "\\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\", "\\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\", "\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\ \\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\ \\x2e\\x65\\x07\\x00\\x00\\x18\\xa1\\x49\\x44\\x41\\x54\\x68\\xde\\x35\\x99\\x67\\ \\x7c\\x55\\x55\\xfa\\xb6\\xaf\\xbd\\x4f\\x4b\\x4e\\x7a\\x25\\x09\\x25\\x09\\x09\\ \\x1d\\x44\\x91\\x26\\x8a\\xe8\\x20\\x4a\\x71\\x44\\x01\\xb1\\x2b\\x16\\xb0\\xcc\\ \\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\", "\\xc7\\xec\\x1b\\xc3\\xdd\\x28\\x5c\\x8b\\x4d\\x8c\\xd5\\xbd\\xa1\\xf1\\x6a\\xa0\\ \\x18\\x68\\x05\\xb2\\x80\\x1e\\xc0\\x66\\x48\\xff\\x11\\x46\\xee\\x04\\x3f\\xf0\\ \\xdd\\xe5\\x28\\xf2\\x36\\x27\\x29\\xc1\\x02\\x0a\\x68\\x04\\xc2\\xb4\\x51\\xc0\\ \\x29\\xa0\\xbb\\x51\\x49\\x81\\xf1\\x14\\x46\\x49\\x03\\xc6\\x45\\x42\\x5d\\x81\\ \\x66\\xc0\\x04\\x6e\\x06\\xa6\\x6e\\x80\\x84\\xc7\\x9d\\xcd\\x99\\x0a\\xca\\x85\\ \\x78\\x1d\\xb8\\xd7\\x02\\x95\\x69\\xd0\\x91\\xe5\\x54\\x98\\x68\\x0b\\x27\\x89\\ \\x58\\x0f\\xfc\\xc0\\xb6\\xf9\\x21\\x2e\\x3a\\x08\\xc6\\x38\\x2f\\xd4\\x74\\x07\\ \\x86\\x01\\x17\\x01\\x67\\xc0\\xa8\\x85\\x9e\\xab\\x88\\xdd\\xd6\\x8c\\xbb\\x05\\ \\xd8\\xe9\\x81\\x5f\\xde\\x06\\x75\\x01\\x0a\\xc1\\x58\\x05\\xd7\\x3e\\x0b\\x97\\ \\xc6\\xed\\x47\\xee\\x02\\xfe\\x04\\x36\\x4f\\x27\\xca\\x62\\x56\\x92\\x4e\\x77\\ \\x1b\\xd8\\xa4\\xb2\\x01\\x1f\\x75\\x98\\xf9\\x8f\\x42\\xcd\\x1c\\x5a\\xcc\\xe1\\ \\xb8\\x83\\x98\\x44\\xb0\\x68\\x02\\x7c\\xc0\\x1e\\xe0\\x9a\\x74\\xa0\\x08\\xa8\\ \\x05\\x16\\x79\\x30\\x82\\x83\\x70\\xd3\\x08\\xc9\\x95\\xd0\\x91\\xe8\\x14\\x60\\", "\\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\ \\xd9\\x16\\xd4\\xbc\\x1d\\xe9\\x2a\\xbf\\xc4\\x15\\x12\\xf3\\xd5\\x4a\\xab\\x2c\\ \\x8e\\x4b\\xb9\\x23\\xa5\\x67\\x90\\xae\\x2a\\x94\\x58\\xa9\\x06\\xa4\\x20\\x4b\\ \\xed\\xcb\\x0b\\xf6\\x4a\\xbc\\xab\\x38\\xfb\\x55\\x81\\x65\\x4f\\x79\\xf9\\x46\\ \\xed\\x04\\x74\\x9a\\x6a\\x1d\\x24\\xa4\\xad\\x48\\x11\\x3a\\x15\\x34\\x7f\\x94\\", "\\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\ \\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\", "\\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\ \\x6a\\xd1\\xce\\x2a\\xa4\\xb5\\x68\\xb3\\xec\\x82\\xac\\x6b\\xd1\\x9a\\x18\\xd2\\ \\x55\\x48\\xd9\\x48\\x69\\x86\\xaa\\x3f\\x44\\xff\\x16\\x3a\\xb5\\xc6\\x94\\x92\\ \\xd2\\x25\\x86\\x38\\x8b\\xbd\\x47\\x32\\x6e\\x56\\xe4\\xca\\x62\\x9d\\x6a\\x41\\ \\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\", "qt_version = [int(v) for v in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]:", "\\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\ \\xe5\\x00\\x47\\x52\\x69\\x89\\x5e\\x4e\\x10\\x48\\x21\\x88\\xcb\\x58\\x08\\xbe\\ \\x00\\x6c\\x01\\xf6\\x3e\\x41\\x2d\\xb9\\x44\\xf1\\x60\\x81\\x9d\\x11\\x93\\xca\\ \\x31\\xb7\\xba\\xe1\\x82\\x9f\\x81\\x02\\x03\\xa2\\x99\\x30\\x14\\x0e\\xa4\\xc0\\ \\x13\\xc7\\x21\\x7b\\x01\\x70\\x36\\xcb\\x09\\x43\\x89\\xc4\\x1f\\x28\\x63\\xe1\\ \\x0a\\x9b\\xc9\\x32\\x9d\\x30\\xd5\\xd5\\xe9\\x91\\x46\\xe0\\xc4\\x18\\xd0\\x83\\ \\x16\\x78\\xea\\x1c\\x21\\x1c\\x04\\xa1\\xfb\\xc8\\x9e\\x9f\\x41\\x4d\\x1e\\xc4\\ \\x5e\\x39\\xee\\xa4\\xc2\\x5c\\xb0\\xfa\\x40\\xb5\\xcb\\xce\\x57\\xb6\\x94\\xdb\\ \\x91\\x36\\x06\\xd6\\xc2\\x4b\\x39\\xa1\\x1c\\x2a\\x00\\x11\\x81\\x0b\\xde\\x82\\", "\\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\", "\\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\", "\\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\ \\x3a\\x54\\x7b\\xa0\\x67\\x08\\x06\\x6c\\x00\\xef\\xcf\\x6e\\x62\\xbd\\x5d\\x44\\ \\x0b\\xc3\\x44\\x4a\\x21\\xad\\x19\\x8c\\x9f\\xbc\\xf0\\x4e\\x1a\\x54\\x5e\\x65\\ \\x0b\\x13\\x5d\\x9c\\x86\\xef\\x07\\xde\\xb7\\x20\\xfb\\x10\\x3c\\xbc\\x0d\\x06\\ \\xc7\\x61\\x25\\xb0\\x22\\x07\\xea\\x9e\\xb0\\x29\\xc8\\xa8\\x05\\x8d\\x27\\x48\\ \\x29\\x1e\\xca\\x70\\x73\\x14\\xa3\\xcf\\xab\\xd0\\xd2\\x0b\\xac\\x54\\x1a\\x1a\\ \\x5e\\xc2\\x68\\x8e\\xa3\\x0a\\x13\\x06\\xaf\\xb7\\xc5\\x96\\xdf\\x6d\\x24\\xd4\\ \\x7f\\x0c\\x1b\\x7d\\x30\\xfd\\x59\\xe0\\x95\\x3c\\x9b\\x3e\\x53\\x92\\x60\\xc8\\", "\\x8b\\x8d\\xb2\\xcc\\x0f\\x14\\xfc\\x0d\\xc5\\x6f\\xf0\\x49\\xcc\\x96\\xd8\\xa1\\ \\x28\\x87\\xa4\\xbf\\x77\\x97\\x26\\xa3\\x76\\x36\\xa9\\x9c\\x46\\x9d\\xa0\\x42\\ \\xed\\x4f\\xa5\\xc9\\x1a\\xd8\\x4f\\x07\\x8d\\x66\\x3d\\x52\\x70\\x4c\\x4a\\xfe\\ \\x51\\x67\\xb6\\xf8\\xb4\\xe3\\xdc\\x1b\\x34\\x3f\\x7f\\xa9\\x46\\x0e\\x8d\\xeb\\ \\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\", "\\xbc\\xa1\\xb2\\x08\\x48\\x7d\\xc7\\x4b\\xf3\\x3d\\x92\\xb1\\x51\\x41\\x24\\x15\\ \\x0c\\x94\\x6e\\xf1\\xaa\\xd1\\xa8\\xd3\\x7e\\xa4\\xbd\\xc8\\x4e\\x9b\\xf7\\x64\\ \\x49\\x77\\x23\\x0d\\xef\\xa3\\x33\\x54\\xca\\x0c\\xb7\\x9b\\x04\\x01\\xcb\\xeb\\ \\x58\\x9f\\x6f\\x0d\\x4a\\xf6\\xc0\\x0a\\xa0\\x75\\xa2\\x23\\xc4\\x78\\xe1\\x93\\ \\x3c\\xa6\\xfd\\xdb\\x66\\xa9\\xe3\\xc0\\x67\\xe7\\xc2\\x2f\\x4f\\xc2\\xbe\\x12\\ \\x9b\\x1f\\x9a\\x12\\xa0\\x7a\\x11\\x34\\xac\\x87\\x23\\xef\\x74\\xe0\\xcd\\x84\\ \\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\", "\\xb3\\x78\\xb7\\xf3\\x37\\x08\\x29\\xef\\x40\\x7b\\x8a\\x53\\x72\\x7f\\xe0\\x38\\ \\xe2\\x7a\\x0c\\x42\\xb6\\xbe\\xf8\\x1f\\x86\\x60\\x0c\\xb8\\x11\\xc8\\x43\\x0c\\ \\x25\\x46\\x04\\x8f\\x71\\x2d\\xf4\\xad\\x27\\x7c\\xf8\\x67\\x5a\\xc8\\x23\\x86\\ \\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\", "\\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\ \\x26\\x13\\xab\\x7e\\x04\\x77\\x47\\x0d\\x78\\xb7\\x62\\xfc\\xd3\\x42\\x27\\x0d\\ \\xe8\\x16\\x87\\x87\\x4f\\x42\\x61\\x1d\\x94\\xf7\\x87\\x25\\xe9\\x7f\\x99\\x67\\ \\x18\\xdb\\x09\\x17\\x1e\\x06\\x6f\\x1d\\xb8\\x9b\\x0c\\x4e\\x8d\\x16\\x47\\xbb\\ \\xc2\\x19\\xd3\\x2e\\x22\\x0b\\x28\\x8c\\x83\\xab\\x0d\\x7a\\x1d\\x36\\x71\\x9d\\ \\x4e\\xa2\\xa3\\x28\\x08\\xfd\\xe2\\x18\\xc9\\xd0\\x6a\\x40\\x72\\x04\\x32\\x0f\\ \\x01\\x6d\\x2e\\xf0\\x5a\\x90\\x21\\xac\\xae\\x60\\x76\\x02\\x27\\x9c\\x23\\x6d\\ \\x72\\x6e\\x17\\x9a\\x9c\\x4b\\xfa\\x46\\x03\\x12\\x65\\x53\\xe3\\x28\\xc7\\x9d\\ \\x1e\\x73\\x12\\x60\\x93\\x09\\xcd\\x16\\x04\\x4d\\xc8\\xb3\\x60\\x53\\x1b\\x9a\\ \\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\", "\\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\", "\\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\ \\xb6\\x6e\\x31\\x01\\x6e\\xc6\\x04\\x92\\x38\\x82\\xcb\\x5c\\x08\\x66\\x14\\xd6\\ \\x16\\x51\\x5b\\xbe\\x8d\\x72\\x32\\xd8\\x88\\x97\\x29\\x40\\xbf\\xba\\x7d\\x98\\ \\x0d\\x09\\x10\\x6f\\x05\\x8e\\x02\\x9e\\x10\\xc4\\x60\\x67\\xa9\\x9d\\xfd\\xaf\\ \\xde\\x0d\\xe6\\x4a\\x0f\\x58\\x3d\\x1d\\x90\\x95\\x13\\x9b\\xf6\\x2d\\x1f\\xbd\\ \\x15\\xe4\\x88\\xdf\\x2e\\xa0\\x15\\x38\\x0b\\x1c\\x2f\\x82\\xd0\\x5c\\xa0\\xf0\\ \\x57\\x60\\x9d\\xcd\\x63\\x1a\\x0a\\xcb\\x06\\xd0\\x50\\x01\\x91\\x81\\x40\\xce\\ \\x61\\xe7\\x39\\xb9\\xd0\\x39\\xc8\\xc6\\x6e\\x04\\xc0\\xc0\\xcd\\x69\\x3b\\x68\\", "\\x92\\xfc\\x72\\x99\\x59\\xde\\x36\\x82\\x35\\xc0\\x71\\xc8\\xf8\\x16\\x26\\x07\\ \\xa2\\xb8\\x3c\\x1e\\x20\\x09\\xc6\\x9d\\x65\\x75\\x29\\xcc\\xa9\\x8f\\x30\\xe4\\ \\x66\\xa0\\x2a\\x19\\x22\\x3e\\x88\\x87\\x69\\xb9\\x5e\\xfc\\x38\\x00\\xae\\x06\\ \\x1e\\x04\\x2e\\x8c\\xdb\\xd0\\x3a\\x5f\\x90\\x6c\\xc0\\x84\\x38\\x74\\xab\\x07\\ \\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\", "\\xa8\\xc5\\xc0\\xed\\xde\\xce\\xe0\\x41\\xf7\\x81\\x3e\\x46\\x65\\x01\\x14\\x38\\ \\x81\\x74\\x1c\\x6d\\x17\\xea\\xfc\\x1a\\x29\\x0d\\x69\\x80\\x5b\\x07\\x4e\\xa1\\ \\xfb\\x85\\xaa\\xca\\x90\\xce\\xc3\\x0e\\x58\\x8c\\x95\\x98\\x29\\x65\\x75\\x51\\ \\x60\\x17\\xfa\\x40\\x68\\x95\\x50\\x5d\\x25\\xd2\\x39\\xa9\\x12\\xe3\\x24\\x1e\\ \\x96\\x78\\x59\\xe2\\xff\\x6c\\x07\\x3c\\x7c\\xac\\xf4\\x32\\x52\\xde\\x60\\x89\\ \\x47\\x25\\x5e\\x93\\xf2\\xc6\\x49\\xfe\\xcb\\x24\\x96\\x4b\\x3c\\x23\\xf1\\x93\\ \\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\", "\\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\", "\\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\ \\xad\\xa1\\x55\\xe3\\x86\\x48\\x1f\\x12\\x50\\xa5\\x7b\\xad\\xf4\\xb8\\x21\\x2a\\", "\\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\ \\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\", "\\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name =", "\\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\", "\\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\ \\xd7\\x90\\x60\\xc1\\xed\\x2b\\xa8\\x5e\\xf7\\x22\\x4b\\x0a\\x07\\x91\\x5b\\xe6\\ \\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\", "\\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\", "\\xa2\\x08\\xeb\\x74\\x77\\x9e\\xe4\\x9a\\xdb\\x9f\\x79\\xc6\\xda\\x14\\xd8\\x38\\ \\x19\\x92\\x8f\\xc1\\xe8\\x30\\xd1\\x2b\\x0d\\x52\\x9e\\xea\\x86\\xf1\\x45\\x02\\ \\x14\\x89\\xc8\\x05\\x89\\x54\\xbd\\xb8\\x8f\\xa5\\x8f\\x07\\xf8\\xfc\\x21\\xe1\\ \\xca\\xb0\\xc5\\x32\\xd9\\x21\\xcd\\xfa\\x20\\x44\\x4a\\x81\\x1b\\xa2\\xf8\\x87\\ \\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\", "\\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\ \\x69\\x74\\xa8\\x19\\x6d\\xaf\\x40\\xba\\x18\\x59\\x43\\xd0\\xd3\\x21\\x54\\xbf\\ \\x18\\xc9\\x9f\\x2a\\xb9\\xfc\\x52\\x8a\\xa9\\x8a\\x15\\x86\\xae\\x10\\xfa\\x57\\ \\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\", "\\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\", "Created by: The Resource Compiler for PyQt5 (Qt v5.12.5) # # WARNING! All", "\\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\", "\\xea\\x31\\x5c\\xf1\\x5f\\x91\\x75\\x10\\xe9\\xbc\\x14\\x27\\xc6\\xbe\\x69\\x9f\\ \\xc4\\x7d\\x49\\xd2\\x55\\x99\\x4e\\xbc\\x9d\\x2a\\x3d\\x89\\xc4\\x74\\x89\\xf7\\ \\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\", "\\xd9\\x4d\\x45\\x1c\\xf8\\xc2\\xfe\\x21\\xcf\\x09\\x98\\xf9\\x13\\x5c\\xe9\\x3c\\ \\x36\\xd9\\xf9\\xea\\x70\\xc0\\xb7\\x06\\xf7\\xba\\xc5\\x0c\\xe6\\x01\\xd2\\x71\\ \\x93\\x42\\x94\\x44\\x0e\\x63\\x31\\x91\\xfa\\x9a\\x67\\x68\\xe7\\x26\\x16\\x58\\ \\xc9\\xb8\\x5d\\xce\\x77\\xe5\\x34\\xea\\x21\\x60\\x7b\\x29\\x8c\\xbd\\x0c\\xc8\\ \\x05\\xd6\\x47\\xa1\\xf2\\x28\\x14\\xc3\\xe9\\x3b\\x0c\\x62\\x45\\xb5\\xc4\\x32\\ \\x6a\\x09\\xf9\\x21\\xec\\x03\\x9f\\x0f\\x3c\\xfd\\xa0\\xc6\\x03\\x41\\xa0\\x3c\\ \\x0f\\x0a\\xbf\\x12\\xe9\\x5b\\x1a\\x61\\xc3\\x17\\xf0\\xe7\\xaf\\xd0\\x2c\\xa8\\ \\x2e\\x80\\xa6\\x49\\xc0\\x14\\xec\\x4f\\x36\\x00\\x27\\x81\\xef\\x60\\x76\\x0d\\ \\xfc\\xd3\\x81\\x45\\x14\\x38\\x88\\xcd\\x1e\\x06\\xe0\\x8b\\x62\\x10\\x26\\x8f\\ \\x18\\xb5\\x24\\xd0\\x8c\\x41\\x3a\\xb3\\x39\\x0c\\x84\\x28\\xa1\\x37\\x70\\x15\\ \\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\", "\\xa4\\xc5\\xd1\\x69\\x01\\x3d\\x7f\\x04\\x4e\\x7b\\x9c\\x73\\xaa\\xb2\\xd3\\xcd\\ \\xf0\\xd5\\xb4\\x7d\\xd0\\x48\\x3c\\x03\\x8c\\x4f\\x81\\xf6\\x5c\\xe0\\x24\\x06\\ \\xeb\\x60\\xfa\\xff\\xa0\\x17\\x50\\x59\\x07\\xfc\\x66\\x7b\\xf0\\xdc\\xd3\\xb6\\ \\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\", "\\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\ \\xe1\\x6b\\x0d\\x00\\xb3\\xd5\\x89\\xd4\\x01\\x87\\x92\\xba\\x3b\\x8b\\x71\\x3b\\ \\xff\\x6f\\xea\\x03\\x4d\\x1d\\x30\\xa4\\x1a\\x4e\\x5a\\xf6\\xae\\xf5\\x07\\x56\\", "def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()", "\\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\ \\x1e\\x69\\x58\\x9a\\x94\\xf0\\x90\\x4e\\x25\\xaf\\x11\\x81\\xa9\\xd9\\xd2\\x3f\\ \\x3c\\x6a\\x6d\\x41\\x3f\\xcb\\xa1\\xd0\\xc4\\x04\\xc9\\x3f\\x48\\xb1\\xaf\\xd0\\", "= qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct,", "\\x0e\\xc3\\x9d\\xcf\\xc3\\xfb\\xf9\\xd0\\xfe\\x36\\x10\\x83\\x5e\\xf7\\xc3\\xd1\\ \\x6f\\x80\\x0e\\x2c\\x52\\x30\\xe9\\x0e\\xfc\\x8f\\x0a\\x9e\\x24\\x42\\x23\\x99\\ \\x74\\x52\\x46\\x37\\x06\\xf3\\x13\\x49\\x7c\\x03\\xc6\\x25\\x9c\\xd2\\x60\\xd6\\ \\x53\\x42\\x12\\x60\\x54\\xfd\\x82\\xaa\\xfa\\xc2\\xd6\\x4c\\x98\\xbd\\x15\\x12\\ \\xe7\\xb9\\xc0\\x63\\x41\\xc8\\x84\\xac\\x38\\x24\\x00\\xbf\\x00\\x5d\\x81\\x01\\ \\x06\\x8d\\x17\\x19\\x6c\\xb8\\xdc\\xe2\\x54\\x81\\xfd\\x52\\x9f\\xd3\\x90\\x17\\ \\x86\\xf6\\x1c\\xd8\\x92\\x0a\\x85\\xc0\\xc5\\xe5\\xe0\\x8d\\x81\\x2b\\x05\\x94\\ \\x01\\x91\\xd3\\x90\\xb8\\x20\\x17\\xde\\xeb\\x0f\\xa1\\xa1\\xc0\\x40\\xe0\\x0f\\ \\x18\\xbe\\x1c\\xf6\\x0f\\x86\\xce\\x47\\x81\\x1a\\x9b\\x3a\\x69\\x02\\xb6\\x42\\ \\xca\\x6e\\x08\\x05\\x89\\x45\\xd7\\x53\\x8b\\x81\\x17\\x93\\x6c\\x7e\\xc0\\x74\\ \\x7d\\x4c\\x34\\x3e\\x8f\\x43\\x14\\x73\\x12\\x17\\xa9\\x40\\x42\\xfa\\x19\\x8c\\ \\x52\\x19\\xca\\x05\\xba\\x20\\x2e\\x06\\xee\\x3a\\x0c\\xfe\\x76\\x30\\xbf\\x04\\ \\x3e\\x07\\x32\\x80\\xcb\\xa0\\xf9\\x2a\\xd8\\x71\\x11\\xb4\\x87\\xa1\\xdf\\x09\\", "\\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\ \\x2f\\x9e\\xb4\\x38\\x5b\\x67\\xc0\\xaf\\x77\\x43\\xcb\\x3f\\x40\\x17\\xc5\\x49\\ \\x09\\x86\\x31\\xab\\x23\\x10\\x8a\\x80\\x51\\x8b\\x75\\x63\\x3b\\x4d\\x43\\x20\\ \\x7b\\x0f\\x24\\xaf\\x32\\xe0\\xac\\x1b\\x38\\x0d\\xb4\\x81\\xcf\\x05\\x39\\x35\\ \\x30\\xf8\\x28\\xf4\\xf9\\x12\\x9a\\x16\\x40\\xc4\\x0b\\x1a\\x0d\\x94\\xd8\\x05\\ \\x46\\x57\\x60\\xf4\\xb4\\x20\\xd5\\xd6\\x4b\\x92\\x81\\x6d\\xc0\\xd2\\x12\\x08\\ \\x3c\\xe7\\x54\\xe6\\xc3\\xcd\\x29\\x22\\x18\\x94\\xf2\\x3d\\x09\\xf8\\x68\\x24\\ \\x15\\xe1\\xe6\\x00\\x60\\x3c\\x2e\\x34\\x06\\x98\\xb0\\xd7\\xc1\\xe9\\x61\\x60\\ \\x08\\x34\\x64\\x40\\xd6\\x63\\x60\\xec\\x02\\xfc\\x49\\x58\\x53\\x23\\xec\\xb8\\ \\x39\\xca\\xee\\x7e\\x10\\x31\\xe1\\xbc\\x6a\\x28\\xfd\\x15\\x92\\xc3\\x70\\x70\\", "\\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\ \\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\ \" qt_resource_struct_v1 = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\", "2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(rcc_version,", "\\xd0\\xd7\\x16\\xfa\\xde\\x42\\x91\\xcf\\x91\\x35\\x07\\x35\\x6f\\x40\\xe5\\x42\\ \\x2d\\x9d\\xc8\\x7a\\xd0\\x70\\x02\\xd8\\x42\\x89\\xb7\\xa5\\x82\\x0b\\x14\\xd9\\ \\x87\\x34\\x0b\\x89\\xc5\\xf6\\x08\\xe1\\xda\\x22\\xbb\\xb1\\x79\\x59\\xe2\\x4f\\ \\xc9\\x7b\\x8b\\xf4\\x3c\\xb2\\x78\\x4d\\x47\\x88\\xab\\x95\\x93\\xd2\\x2d\\x48\\ \\x09\\x97\\xc9\\xa2\\x5a\\xe2\\xa8\\xe2\\x74\\xd8\\x73\\x94\\x84\\x71\\xd2\\x84\\ \\x54\\x29\\xad\\x44\\x1a\\x93\\x22\\x1a\\xd7\\x20\\x8d\\xb0\\x67\\x22\\x95\\xb5\\ \\xa8\\x7a\\x1f\\x52\\xae\\xcb\\x59\\xc4\\x20\\xb5\\x5d\\xe3\\xd5\\x8f\\xad\\x68\\ \\x6f\\x33\\x0a\\xbf\\x66\\x48\\x05\\xc9\\x12\\x57\\x4a\\xfe\\x21\\xaa\\x2c\\x47\\ \\x3b\\x9a\\x91\\x8a\\x0d\\xc9\\xb8\\x44\\x72\\x4f\\x53\\xec\\xfe\\x02\\x55\\x34\\ \\x22\\xeb\\x30\\xd2\\xa4\\x44\\xc9\\x78\\xca\\x19\\xf6\\xcc\\x97\\x86\\x95\\x48\\ \\xfb\\x90\\x0a\\xa6\\x48\\xfc\\x20\\x79\\x2f\\x96\\x16\\xe3\\x4c\\xbc\\xbe\\x92\\ \\xce\\x19\\x2e\\xcd\\xec\\xa9\\x38\\x47\\xd5\\x4e\\x4c\\x56\\xef\\xbb\\xa4\\xe1\\ \\xdd\\x24\\xbe\\x51\\x05\\x71\\xd5\\x10\\x57\\x98\\xbd\\xd2\\xe8\\x3e\\xd2\\xb9\\", "Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt", "\\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\", "\\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\ \\xe7\\x02\\xdc\\x34\\x92\\x4a\\x10\\x94\\x0f\\x74\\xe0\\x36\\x77\\x61\\xf8\\x94\\ \\xa3\\xb1\\xd4\\x13\\x02\\xfa\\x02\\xcf\\x34\\x42\\x97\\xbb\\x80\\xad\\x29\\x30\\ \\xb9\\x9d\\xce\\xfb\\x21\\xbc\\x07\\xd2\\x3f\\x32\\xa0\\x5c\\x50\\xef\\x03\\x2b\\ \\x05\\xa2\\x3d\\xed\\x10\\x43\\x0b\\x3c\\xb7\\x8d\\xdf\\x9f\\x86\\xf4\\x3a\\xe8\\ \\x33\\xd5\\x80\\x6d\\x53\\x81\\xee\\x0e\\x36\\xdd\\x0e\\x5e\\x92\\x9d\\xf3\\x8e\\ \\xd9\\xcd\\x4f\\x9e\\xa3\\x38\\x1f\\xc3\\xa2\\xb7\\xd1\\x35\\x60\\xec\\x75\\x5a\\ \\x27\\xe4\\x6c\\x62\\x05\\x30\\x6f\\x18\\x58\\xf7\\x01\\xa3\\x09\\x90\\x88\\x89\\ \\x9f\\x1d\\x24\\x53\\x80\\x9b\\x16\\x44\\x2a\\x06\\xed\\x80\\x2b\\x6f\\x5e\\x74\\ \\xde\\xb9\\x58\\xcc\\x04\\xae\\x6b\\x83\\xbc\\x23\\xce\\xf3\\x7f\\x8b\\xc0\\x2e\\", "\\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\ \\xf9\\xfa\\xb1\\xba\\xb6\\xfc\\x2e\\x69\\xd0\\x10\\x19\\x0d\\x7c\\xab\\xe5\\x13\\ \\x5d\\xdc\\x64\\x3e\\x40\\xda\\xaf\\x03\\xc0\\xbf\\x0e\\xba\\x75\\xf2\\xf5\\xe7\\", "\\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\ \\xc6\\x01\\x60\\x37\\xa4\\x7d\\x41\\x7c\\xee\\xeb\\xb4\\xfc\\xb1\\x14\\xce\\xc6\\ \\x61\\x76\\x16\\xb4\\x5f\\x64\\xab\\x6c\\xea\\x7a\\x98\\xb3\\x01\\x4e\\xcc\\xb1\\ \\xbb\\xac\\x74\\x83\\xcd\\x54\\x64\\x43\\xef\\x03\\x70\\xfe\\x59\\xe2\\xb4\\x12\\ \\x66\\x02\\x8c\\xf8\\x89\\xac\\xcc\\x7b\\x71\\x11\\x63\\x1f\\x59\\xd4\\xb4\\x6f\\ \\x87\\xce\\x0c\\x78\\xe0\\x1d\\xba\\x78\\xdf\\x60\\x32\\x27\\xed\\xaf\\xea\\x2c\\ \\x7a\\x44\\xa8\\x79\\x0b\\xd2\\x6c\\xa4\\xbb\\xd1\\x67\\x31\\xb4\\x40\\x48\\xaf\\ \\x21\\xf5\\x44\\x32\\x0b\\xa5\\x9b\\x4c\\xad\\xb0\\xd0\\xb3\\x42\\x2b\\x85\\x1a\\ \\xf6\\x20\\xbd\\x88\\xda\\x8f\\xa1\\x4f\\x84\\x76\\x75\\x22\\xcd\\x44\\x1a\\x8d\\ \\x74\\x77\\x92\\x6a\\x8e\\xd8\\x01\\xcb\\x9a\\xed\\x95\\xdc\\xa5\\x12\\xb3\\x24\\ \\x5e\\xb7\\x21\\xf7\\x86\\x5b\\x9a\\xd4\\xdd\\xce\\xf4\\xec\\x95\\xa6\\xb9\\xa5\\", "\\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\", "qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version", "\\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\ \\xd7\\xcd\\x4b\\x09\\x2f\\x81\\x76\\x13\\x52\\x0c\\xf0\\xec\\x01\\xf3\\x43\\xe0\\ \\xb7\\xf1\\x54\\xec\\xfb\\x37\\xf9\\xb1\\x12\\x02\\x34\\x93\\xe9\\xdf\\x0d\\xe6\\", "\\x9b\\x84\\x82\\x3f\\x20\\x25\\x96\\x38\\xef\\x7f\\x26\\xb1\\x4c\\xe2\\x5b\\x29\\ \\xfd\\x46\\x69\\x2b\\xd2\\x85\\xa5\\x12\\x6f\\x49\\x6c\\x96\\x78\\x43\\x7a\\xd8\\ \\x23\\xf1\\xaa\\xc4\\x26\\x69\\xd8\\xc5\\x12\\x1f\\xa9\\x91\\x98\\xe4\\xba\\x5d\\ \\x71\\xf6\\xa9\\x81\\xb0\\xc2\\x5c\\x2b\\xb9\\x6e\\xd1\\x7a\\xc2\\xba\\x17\\xe9\\ \\x3b\\x62\\x6a\\xf0\\xbd\\x22\\x0d\\xf5\\xab\\x82\\x45\\x22\\xb2\\x1b\\x55\\xef\\ \\x40\\x91\\x25\\xe8\\xcb\\x38\\x52\\x11\\xd2\\x14\\xa4\\x9b\\x4c\\xc9\\x6b\\x4a\\ \\x49\\x1e\\x55\\x2c\\x43\\x5f\\x06\\x91\\xee\\x43\\x72\\x17\\x4a\\xfc\\x4d\\x62\\ \\x92\\x64\\x9e\\x27\\x2b\\xbb\\x50\\x81\\x95\\xe8\\x47\\x0b\\xad\\x11\\xfa\\x46\\ \\xe8\\x90\\x90\\x16\\x20\\x65\\xa4\\x49\\xdc\\xee\\x2c\\xf2\\x6b\\x89\\xef\\xa4\\ \\xec\\xcb\\xa5\\xf7\\x0d\\xa9\\xa0\\xbf\\xc4\\xff\\x24\\xf6\\x4b\\xbd\\x2f\\x95\\ \\xf2\\x87\\x4b\\xfc\\x2c\\x15\\x3d\\x28\\x31\\x47\\x61\\x02\\x12\\x0f\\xa9\\x93\\ \\x5a\\x9d\\xa6\\x46\\x32\\x27\\xa9\\x9a\\x4a\\x7d\\x8d\\xb4\\x84\\x98\\x54\\x38\\ \\x5e\\xe2\\x62\\x45\\xf8\\x42\\xaf\\x13\\x93\\xa1\\xc7\\x11\\xdd\\x40\\xa3\\x20\\", "QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\ \\x89\\ \\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\ \\x00\\x00\\x32\\x00\\x00\\x00\\x32\\x08\\x06\\x00\\x00\\x00\\x1e\\x3f\\x88\\xb1\\ \\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\ \\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\ \\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\ \\xe3\\x01\\x1e\\x09\\x34\\x07\\xfb\\x3d\\x97\\x4b\\x00\\x00\\x00\\x1d\\x69\\x54\\ \\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\", "\\xe0\\x6e\\xc0\\xc4\\x87\\x45\\xcc\\x39\\x91\\x62\\x20\\x25\\xe8\\x3c\\x34\\x05\\ \\x88\\x79\\xc0\\xf4\\xc2\\xe8\\x36\\x22\\xb7\\x59\\x54\\x03\\x1d\\x06\\xb8\\xbd\\ \\xa0\\x64\\x68\\xf4\\xd8\\x20\\xf3\\x3b\\x48\\xf7\\x01\\x4d\\x09\\x10\\xbd\\x10\\ \\x32\\x87\\x05\\x09\\xb9\\xcb\\xf0\\x76\\x82\\xe7\\x87\\x72\\x98\\xb7\\x1b\\x6a\\ \\x9f\\x71\\x8e\\x7b\\xa7\\x8d\\x9d\\x8b\\x6a\\x60\\x2c\\xd0\\xe2\\xf4\\x7b\\xb2\\ \\x53\\x40\\xa7\\x43\\x83\\x56\\x04\\xa8\\xc7\\xcf\\x59\\xb2\\x30\\x38\\x8b\\x9f\\ \\x6c\\x4e\\x72\\x3e\\x71\\x92\\x8c\\xf7\\x71\\xa9\\x1b\\x85\\x0c\\xc4\\x5d\\x48\\ \\x0c\\x37\\x50\\xee\\xb4\\xdd\\x2c\\x60\\xf8\\x77\\xc0\\x07\\xb6\\x25\\x22\\x16\\ \\xb5\\x51\\xb0\\xb4\\x88\\x9e\\x1f\\xb6\\xd3\\xd3\\x48\\x00\\xb7\\x0f\\x92\\x5b\\ \\xe1\\xfc\\x5a\\x62\\x33\\xe1\\xf4\\x14\\xfb\\x24\\x3d\\x40\\xe9\\x72\\x70\\xcf\\ \\x4d\\x83\\x53\\xbd\\x21\\x9e\\x47\\x12\\x9d\\xe0\\x09\\x40\\x34\\x19\\x62\\xfd\\ \\x9c\\x9e\\x6a\\x06\\x32\\x81\\xc1\\x50\\x57\\x85\\x7a\\x74\\x80\\x1b\\x8c\\x6c\\ \\xe7\\xad\\x0c\\xc0\\xed\\xc0\\xab\\xdc\\x07\\x64\\xe0\\x61\\x0f\\xd9\\xe4\\x13\\", "\\x0c\\x35\\xcd\\x46\\x56\\x4f\\xa4\\x81\\x09\\x52\\x41\\x9a\\xa2\\x8b\\xd0\\x71\\ \\xa1\\xf8\\xcd\\x3e\\x89\\xc7\\x25\\x56\\x4a\\x3d\\x67\\x4b\\xcb\\x90\\x36\\x21\\ \\xdd\\xe4\\x96\\x98\\x22\\xf1\\x9e\\xc4\\x42\\x89\\xc9\\xd2\\xa7\\x48\\x2c\\x95\\ \\x28\\x73\\xe0\\x85\\x2c\\x5e\\x56\\x05\\x96\\xda\\x78\\x45\\x4a\\x19\\xae\\x06\\ \\xa4\\x2a\\x1a\\x14\\x4b\\xb9\\x43\\xba\\x20\\x59\\x61\\xe3\\x65\\xfd\\xb3\\x58\\ \\x7a\\xce\\x1f\\xd6\\xdb\\xee\\x23\\xda\\x53\\x34\\x5f\\x9c\\xe8\\x40\\x0d\\xfb\\ \\x90\\x1e\\x42\\x7f\\xb6\\x23\\x6b\\x13\\xd2\\xf9\\xa6\\xc2\\xff\\x41\\x95\\xeb\\ \\x90\\xd2\\x3d\\x92\\xd1\\x53\\x2a\\x71\\xab\\x6a\\x13\\xfa\\x8f\\xd0\\xea\\xb3\\ \\x28\\x3a\\x16\\x89\\xf3\\x24\\x6e\\x92\\xb8\\x56\\x62\\x92\\x42\\x4f\\xb8\\x55\\ \\x65\\x21\\xdd\\x80\\xc4\\x1d\\x12\\x1f\\x49\\xfc\\x21\\x99\\x9f\\x4b\\x85\\x57\\ \\x4b\\x6b\\x90\\x26\\xba\\x25\\xde\\x95\\xc5\\x59\\x89\\x72\\x69\\x46\\x0f\\xa9\\ \\x70\\x8c\\xe2\\x54\\x2a\\x4a\\x48\\x3a\\xb7\\x97\\xc4\\x1a\\x95\\x63\\x29\\x98\\ \\xfb\\xb0\\xe4\\x9a\\xa5\\x83\\x58\\x6a\\x64\\x9b\\xe4\\x2f\\x52\\x83\\xf7\\x2b\\", "PyQt5 import QtCore from silx.gui import qt as QtCore qt_resource_data = b\"\\ \\x00\\x00\\x19\\x3d\\", "\\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\ \\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x73\\ \\x00\\x08\\ \\x0a\\x61\\x5a\\xa7\\ \\x00\\x69\\", "\\x25\\xd4\\xf4\\x15\\x52\\x96\\x7d\\x4d\\x1a\\x5e\\x63\\xcf\\x3c\\x1e\\x93\\x2d\\ \\x8e\\xd6\\xbd\\x48\\x45\\x09\\x92\\xbb\\xc4\\xf6\\x59\\x29\\xdd\\xd5\\xf2\\xab\\ \\xa1\\xaf\\x85\\xde\\x77\\x66\\x23\\xed\\xfb\\x90\\x6e\\x47\\x32\\x06\\x4b\\x3c\\ \\x27\\x71\\x97\\x74\\x5e\\x9a\\xac\\x16\\xd4\\x20\\x64\\x1d\\x40\\xea\\x9f\\x2e\\ \\x31\\x47\\xe2\\x6a\\xe9\\x46\\xaf\\x62\\x6d\\xce\\xe5\\x76\\x7e\\x77\\x7b\\x4e\\ \\x92\\x3b\\xda\\xbe\\x1f\\x5e\\x89\\x74\\xa5\\x5f\\x62\\xae\\x7d\\xb5\\x6a\\x3c\\ \\x29\\xbd\\xe8\\x91\\xe6\\x23\\x3d\\x85\\xd4\\xbb\\x44\\x31\\x96\\xa9\\x99\\xcd\\ \\x8a\\xf1\\x95\\x94\\x3b\\x52\\xee\\x9e\\x31\\xbb\\xd8\\x8c\\x97\\x80\\xc6\\x5c\\ \\x68\\xac\\xa3\\x35\\xdf\\x6e\\xde\\x5e\\xc0\\xf4\\x27\\xc1\\x58\\xdc\\x17\\xac\\ \\x73\\x1c\\x00\\x45\\xb0\\x6e\\x69\\xe4\\x8f\\xc1\\xc2\\x72\\xfa\\x23\\x82\\x1d\\ \\x1a\\x39\\x05\\x28\\xc5\\x01\\x5e\\x21\\xec\\xbd\\x15\\xe3\\xb1\\x77\\xf1\\x2c\\ \\x0a\\x63\\xd4\\x00\\x87\\x46\\x3b\\x1e\\x6c\\x34\\x7c\\xeb\\xc7\\xf5\\xaf\\x4f\\ \\xed\\xd3\\xa8\\x79\\x9d\\x18\\x93\\x70\\xd7\\x1d\\x83\\xdd\\x83\\xa0\\xc0\\x24\\", "\\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\ \\xde\\xcf\\xdf\\x6a\\x81\\xb9\\x40\\x03\\x6c\\xe9\\x7a\\x88\\x7f\\x54\\x47\\x71\\ \\x7d\\x0d\\x9c\\xee\\x0f\\x87\\xa1\\x79\\x3f\\x1c\\xc7\\xcf\\x50\\x60\\xc6\\xbf\\ \\x2c\\xf8\\x4f\\x7f\\x68\\x1e\\x00\\x1d\\xbd\\xe0\\x60\\x0a\\x08\\xb2\\x42\\x70\\ \\x3e\\xb0\\x28\\x0c\\x49\\x19\\x10\\x9e\\x0a\\x34\\x74\\x03\\x6e\\x82\\xf8\\xc3\\ \\xd0\\xf2\\x02\\x7d\\x2f\\x99\\xc1\\xa8\\x45\\x09\\x14\\xd7\\x42\\x5a\\x38\\x0e\\ \\x1d\\x6d\\x44\\x18\\x88\\xf7\\xb7\\x97\\x30\\x27\\x7e\\x47\\xa8\\xde\\xc0\\xb7\\ \\x29\\x87\\xd6\\xa3\\x23\\xd0\\xa9\\x2c\\x5c\\x8f\\x4d\\xc0\\x38\\xe4\\xa1\\xf7\\ \\xeb\\x29\\xf0\\x99\\x8b\\xe3\\xea\\x46\\x60\\xed\\x83\\x64\\x86\\xfb\\x91\\x1c\\ \\x4d\\xa7\\xa6\\xc5\\xa0\\xe0\\x64\\x22\\x93\\xdf\\x9a\\x8b\\x59\\x19\\x8c\\x92\\ \\xb8\\x0d\\xc8\\x81\\x78\\x14\\xc2\\x64\\x91\\x91\\x68\\xc2\\x56\\x17\\xb8\\xce\\ \\x62\\x8d\\x83\\x0d\\x37\\xc1\\x68\\x82\\xdc\\x70\\x77\\x02\\x2c\\xcc\\x05\\xf2\\ \\xc1\\x30\\xe0\\x9c\\xcd\\x1c\\x7b\\xb3\\x8d\\xde\\xcf\\xc2\\xae\\x3e\\xb0\\x0b\\", "\\x1e\\x39\\x88\\xc6\\xfe\\x17\\x3f\\x2f\\x93\\xce\\x12\\x0c\\xda\\x31\\xac\\x3c\\ \\x12\\x67\\x8f\\x20\\x3e\\x35\\x0c\\x1c\\x00\\x22\\x18\\xf8\\xb0\\x36\\xf6\\x83\\ \\xc4\\x44\\x7c\\x74\\x12\\x25\\x4e\\xb1\\xf9\\x09\\xc9\\x2b\\xf2\\xe0\\xf0\\xcd\\ \\xf4\\x30\\x3b\\x99\\x68\\xee\\x87\\x0b\\xc6\\x53\\xf0\\xa6\\x20\\xdf\\x4b\\x81\\ \\x37\\x99\\xe2\\x16\\x93\\x7b\\x3c\\x07\\x31\\x17\\x0e\\xe9\\x44\\x61\\x60\\x00\\ \\x18\\xc5\\x30\\x39\\x0e\\x74\\x58\\xd0\\xe5\\x04\\xc1\\x5b\\xa1\\xd5\\x03\\x33\\ \\x16\\x00\\x1b\\x92\\x41\\x01\\x48\\x6d\\xe5\\xf4\\xdc\\x28\\xf7\\x3c\\x0d\\x1b\\ \\x80\\x33\\x67\\xa0\\xe8\\x13\\x48\\xfe\\x10\\xfa\\x76\\x40\\xb3\\x01\\x9e\\xf5\\ \\x90\\x38\\x2a\\x17\\x96\\x65\\x63\\x5a\\x10\\xfb\\x5f\\x13\\xfc\\xed\\x23\\xa0\\ \\x02\\x38\\x06\\x04\\x60\\xdf\\x38\\x4c\\xd3\\x0b\\x7d\\x77\\x03\\xc2\\x45\\x3b\\ \\x66\\xa0\\x1f\\x84\\x3a\\xf1\\x70\\x9c\\x46\\x92\\x88\\x64\\x9c\\x02\\xab\\x15\\ \\xd1\\x04\\xf1\\x54\\x0a\\x4f\\x0f\\x85\\x13\\x82\\xba\\x22\\x16\\x1d\\x9b\\x42\\ \\x63\\xd5\\xc5\\xb4\\x9b\\x01\\xc2\\xed\\x43\\xe1\\x93\\x76\\xd4\\xb1\\x12\\xe9\\", "\\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00\\x73\\x5e\\x63\\ \\x00\\x6c\\", "\\x94\\x76\\x8f\\x64\\x0e\\x74\\x2c\\xfe\\xcb\\x12\\xdf\\xe8\\x18\\x52\\x9c\\x46\\ \\x29\\x65\\x9c\\x54\\x92\\x24\\xb1\\x54\\xad\\x9c\\x50\\x35\\x31\\xad\\x45\\x6a\\ \\x20\\x2e\\x93\\xad\\xd0\\x1e\\x87\\xa4\\xa3\\xf6\\x49\\xe4\\xd4\\xc1\\x67\\x57\\ \\x02\\x8f\\x03\\xc7\\x0b\\x18\\xd0\\x2f\\x9f\\xe9\\x47\\xe0\\xa5\\xde\\xf0\\xc7\\ \\x3a\\x60\\x4c\\xf8\\x2f\\xb5\\x80\\x40\\x1b\\x49\\x6b\\xa1\\xaf\\x20\\x2f\\x0a\\ \\x39\\xb7\\x02\\xfb\\x66\\x01\\xf7\\x39\\xb2\\x1c\\x85\\xe4\\x23\\xe8\\xa3\\x65\\ \\xf0\\xf0\\x4f\\xf0\\xc2\\x30\\xa8\\x9d\\x65\\xbf\\x97\\xbf\\x0e\\xd2\\x76\\x42\\ \\xf0\\x5e\\x70\\x1d\\x87\\x82\\x57\\x6c\\x80\\x06\\x93\\x20\\x2b\\x44\\x8c\\xbe\\ \\x88\\x5c\\x70\\x2f\\x24\\x9d\\x3a\\x76\\x91\\x49\\x53\\xfb\\x67\\x50\\x97\\x06\\ \\xa3\\xee\\x23\\xd5\\x5c\\x00\\xec\\xe0\\x24\\xb0\\x18\\x13\\x53\\x33\\xe1\\x50\\ \\x2a\\xc4\\xdb\\xed\\x93\\xef\\xb1\\x11\\x2a\\x3c\\xd0\\x7e\\x1d\\x30\\xa8\\x0a\\ \\x3a\\xdd\\x8c\\x98\\xe5\\x61\\x58\\x1b\\x7c\\x92\\x0d\\xdf\\x7f\\x19\\x87\\x3b\\ \\x37\\x41\\xd6\\xef\\x90\\x17\\xc1\\x2a\\xb6\\x59\\xb2\\x7b\\x3b\\x70\\xc8\\x07\\", "\\xa2\\xc7\\x06\\x38\\x5a\\x6b\\x32\\xfe\\x50\\x1a\\x19\\xd5\\x51\\x68\\x4f\\x83\\ \\x84\\x28\\xd4\\xd6\\xd1\\xf9\\x3c\\x6c\\x19\\x0f\\x7d\\x2c\\xe8\\xfe\\x0a\\xb8\\ \\xde\\x99\\x02\\x6d\\x57\\x41\\xe6\\x87\\x90\\xe1\\x42\\x97\\x54\\x41\\x71\\x19\\ \\x9d\\x57\\x99\\xf8\\xdf\\x4b\\x41\\x8b\\x2e\\x27\\x16\\xbe\\x06\\x4f\\xc6\\x6b\\ \\xe0\\xa9\\x85\\xba\\xc7\\x61\\xf8\\xbb\\x70\\x32\\x8c\\x96\\xb5\\x12\\x49\\x08\\ \\xe2\\x5b\\x1e\\x82\\x77\\xee\\x21\\xce\\x2d\\x1c\\xa2\\x3b\\x3d\\x88\\x62\\xb2\\ \\x95\\xdd\\x5c\\xc6\\xf9\\x9e\\x8d\\xec\\x1b\\xf8\\x15\\xc5\\x5d\\x8a\\xd9\\xfc\\ \\xfd\\x97\\x4c\\xce\\x3c\\x97\\xfd\\xf7\\x1f\\xa7\\xab\\xd9\\x40\\xc3\\x2b\\xb7\\ \\x61\\xac\\xfc\\x3d\\x41\\x57\\x7f\\xdf\\xdd\\x1e\\xe6\\xac\\x05\\x4a\\x4c\\xea\\ \\x17\\x59\\x64\\xee\\xcb\\xc1\\x35\\x0f\\x38\\xde\\x1d\\x54\\x04\\xf4\\x71\\xc2\\ \\xcf\\x20\\x3b\\x00\\x4d\\x3a\\x49\\xfd\\xe7\\x65\\xac\\x4f\\x86\\x1b\\xde\\x05\\ \\xe3\\x89\\x31\\xc4\\x9b\\x5f\\x76\\xac\\x6c\\x22\\x2e\\x8e\\x41\\xff\\x77\\xed\\ \\xbc\\x9f\\x93\\x83\\x36\\x8e\\xc2\\xe0\\x12\\x3a\\xb8\\x80\\x24\\xbe\\xb0\\x83\\", "\\xa1\\x3c\\x2d\\xc6\\x4b\\x67\\x42\\xb8\\xae\\x6e\\x7e\\x73\\x5e\\x72\\xaf\\x6d\\ \\xa4\\xbf\\xdf\\x1b\\x7a\\xf9\\x61\\xdb\\x34\\x92\\x7e\\x18\\x86\\xf9\\x51\\x09\\ \\x34\\x8c\\x04\\xa6\\xdb\\x73\\x0c\\x8e\\x42\\xd1\\x01\\xc8\\xfc\\x9e\\xc8\\xa4\\ \\xa3\\x54\\x4e\\x6f\\x64\\x76\\x29\\xdc\\x0e\\x64\\xee\\x04\\xea\\xea\\x31\\x6b\\ \\x4e\\x60\\x5a\\x51\\x4c\\xd7\\x6f\\xa0\\x6e\\x50\\x6f\\x40\\xdd\\x5d\\x58\\xa7\\ \\xfb\\x62\\x5a\\x3e\\xc4\\x4c\\x0c\\xd2\\x70\\xf1\\x7f\\xd0\\x77\\x33\\x9c\\x13\\ \\xc4\\x5d\\x0f\\xfe\\x3a\\x70\\x6f\\x07\\xe3\\xb8\\xe3\\x6c\\x7a\\x54\\x91\\xbe\\ \\x25\\x9b\\x5d\\x4c\\x22\\x89\\x8d\\x24\\x45\\x0b\\x68\\x8d\\x9c\\x8f\\x72\\xf7\\ \\xd1\\xb2\\xeb\\x72\\x9a\\xae\\xf6\\xb0\\x6a\\xfd\\x9d\\x7c\\xf8\\xa4\\xc1\\x23\\ \\xef\\x1c\\xa4\\xa5\\x7d\\x0a\\xff\\x1f\\xa7\\x48\\xb3\\x27\\x67\\x17\\xe2\\x1e\\ \\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\ \" qt_resource_name", "\\xb5\\xca\\xe9\\xc0\\xc7\\x5e\\x52\\xc1\\x16\\xe1\\x8f\\x53\\xed\\xbd\\xc9\\x59\\ \\x4c\\x8e\\xf1\\x23\\x7e\\x5c\\x00\\x24\\xe2\\x82\\xce\\xcb\\x70\\xd5\\xbe\\xc8\\ \\xbc\\x20\\x50\\x09\\x1c\\x48\\x80\\xd6\\x12\\xd8\\x3d\\x1c\\x0e\\xe6\\xdb\\x4b\\ \\x31\\x80\\xb2\\x64\\x68\\xba\\x06\\x7a\\x8c\\x6b\\x23\\x3a\\xa0\\x82\\xf8\\x8c\\ \\x08\\x47\\xc7\\x42\\xb3\\xdb\\x5e\\xb2\\xdf\\xb6\\x87\\xe4\\x1b\\xe0\\x2b\\x8b\\ \\x42\\xc3\\x29\\x48\\xaa\\x80\\xd1\\x95\\x04\\xdf\\x0b\\x53\\x57\\x0c\\x1e\\x13\\ \\x12\\xf7\\x00\\x3f\\xb9\\x81\\x1d\\x90\\xf6\\x13\\xfc\\xb7\\xda\\x86\\xf3\\x91\\ \\x20\\x1c\\xfc\\x1d\\xac\\x13\\x70\\xd9\\x2f\\x30\\x0a\\x70\\x45\\xe1\\xfb\\x10\\ \\x1e\\x26\\x03\\x6d\\x18\\xfe\\x15\\x50\\xd0\\x08\\xa5\\x11\\x4c\\xef\\x1a\\x5c\\ \\x15\\xd3\\xa9\\x26\\x8b\\x0c\\x3c\\x64\\xf0\\x2b\\xc6\\x34\\xa1\\x98\\xd3\\xe7\\ \\x25\\x0e\\x54\\x5a\\x1c\\x93\\x5d\\xef\\xc0\\x26\\xc1\\x69\\xe6\\xae\\x4e\\x4b\\ \\xfe\\x95\\xef\\xb3\\x9d\\xbe\\xf8\\x2b\\xb8\\x02\\x0c\\x88\\x43\\x5e\\x04\\x9a\\ \\x0c\\x30\\x3d\\x90\\x64\\x42\\xd0\\x70\\xd2\\x6b\\x1c\\x5c\\x07\\xc0\\xac\\xb0\\", "\\xa5\\x31\\xbd\\xa5\\xae\\x97\\x49\\xac\\x52\\x88\\x36\\xa9\\x47\\x81\\xfd\\x3a\\ \\x35\\x8a\\x52\\x27\\xb1\\x5c\\x16\\x6f\\x48\\x4f\\xa1\\x78\\xca\\x75\\x6a\\x4a\\ \\x78\\x49\\x35\\x74\\x68\\x8e\\x4b\\x42\\x59\\x3e\\x59\\xef\\xa1\\xda\\x00\\x8a\\ \\x4f\\x4f\\x96\\x28\\x96\\xae\\x40\\xc1\\xfd\\xf6\\x04\\x2a\\xb0\\x14\\xc9\\x3f\\ \\xde\\x59\\xe8\\x34\\x89\\x4b\\xa4\\x94\\x6e\\xaa\\x5d\\x6a\\xea\\x45\\xa1\\x2f\\ \\x2c\\x64\\x6d\\x41\\x2a\\xcd\\x75\\xf4\\xe3\\x11\\x89\\xd5\\x92\\xeb\\x7d\\xe9\\ \\xd2\\x54\\xe9\\x17\\xa4\\x61\\x45\\x12\\x1f\\x4a\\x54\\x2a\\xc6\\x46\\x29\\x6d\\ \\x8c\\xc4\\x9b\\xb2\\x58\\x2b\\xa5\\xa7\\x39\\x8b\\x3d\\x29\\x75\\x4f\\x56\\x94\\ \\xb5\\x3a\\x4c\\x4c\\x01\\x0e\\x4a\\xc6\\x33\\x8a\\xd2\\xa4\\x38\\x01\\x1d\\x24\\ \\xa6\\xf6\\xb1\\xcb\\xa4\\xf3\\x5d\\x8a\\xe6\\x4e\\x51\\x1d\\x3f\\xeb\\x3b\\xa4\\ \\xd5\\xc4\\xf4\\x11\\x12\\xf1\\xcf\\x91\\x1e\\x47\\x9a\\x99\\x2d\\xe5\\xf8\\xa5\\ \\x2b\\x4c\\xe9\\x5b\\x54\\x17\\x45\\xd6\\x03\\x6e\\xc9\\xdf\\x5d\\xe2\\x5c\\x7b\\ \\x66\\xe8\\xc9\\x97\\xae\\x34\\xf4\\xe9\\x09\\x7b\\x28\\xba\\x4a\\x68\\xb1\\xd0\\", "\\x60\\xc1\\xae\\xef\\x08\\x63\\x77\\xfe\\x3a\\xa2\\xa0\\x88\\x0c\\x16\\xac\\x88\\ \\x0a\\xa2\\xa0\\x02\\x4a\\x13\\x51\\x40\\x40\\x7a\\x42\\x4d\\x02\\x49\\x48\\x2f\\ \\x27\\x27\\xa7\\xee\\xfb\\xfd\\xb0\\xb7\\x9f\\xf2\\xcb\\x29\\xfb\\xac\\x67\\xad\\ \\xfb\\xb9\\xcb\\x7a\\x8c\\xbc\\xe8\\x70\\xcd\\xef\\xdc\\xc1\\x75\\x07\\x80\\x10\\ \\x3c\\xd0\\x7b\\x04\\x1b\\xbb\\xee\\x66\\xcf\\xe6\\x28\\xae\\x19\\x49\\x30\\xa6\\ \\x83\\xfa\\x39\\x30\\xe3\\x1c\\xe8\\x0d\\x0c\\xee\\xf0\\x32\\xe3\\xd9\\x08\\x2c\\ \\xc9\\x83\\x48\\x36\\x0c\\x38\\x08\\xe3\\xc5\\xee\\x3b\\xe0\\x9b\\x42\\xd8\\x0d\\ \\x7c\\xb0\\x03\\xd2\\xbe\\x06\\xf7\\x27\\x5d\\xe0\\xf4\\x28\\x70\\xf9\\x20\\xe5\\ \\x04\\x47\\x56\\xed\\xa0\\x6a\\x84\\x49\\x69\\xd8\\x22\\x73\\x85\\x9b\\xb4\\xc7\\ \\xa6\\x42\\x73\\x6f\\x02\\x23\\x9b\\x49\\x78\\x66\\x09\\xee\\x6f\\xc3\\x84\\xa3\\ \\x26\\xbe\\x7d\\x97\\x62\\xb5\\xe7\\xa3\\x59\\x3f\\x62\\xae\\xf6\\xf3\\xf6\\xab\\ \\x11\\xae\\x5f\\x94\\x8f\\x7b\\xe5\\x85\\x7c\\x78\\xaf\\x9f\\xa9\\x89\\xaf\\x52\\ \\x7b\\xf6\\x3a\\x1a\\xc9\\x67\\xe0\\xb7\\x43\\xd9\\xfe\\xf7\\xcf\\x30\\xbf\\x6b\\", "\\x7e\\xd7\\x27\\x63\\x2c\\x98\\x06\\xb1\\x09\\xf6\\xce\\x1b\\xf5\\xf0\\xc8\\x12\\ \\x08\\xc5\\x61\\xcd\\x9d\\x0e\\xe7\\x6d\\x86\\x09\\x31\\xa8\\x1d\\x83\\x41\\x10\\ \\xda\\xf2\\x6c\\x37\\x61\\x96\\x11\\x02\\x38\\x3b\\x01\\x3c\\x11\\xd2\\x53\\xbe\\ \\xc7\\x72\\x7e\\x7b\\x3b\\x13\\x89\\xbf\\xf3\\x03\\x54\\xe6\\x41\\xef\\x32\\x0a\\ \\x86\\xf5\\xc0\\xd4\\x29\\x11\\x00\\x1a\\x8a\\x80\\x30\\xd0\\x2c\\xce\\x59\\x0a\\ \\xe7\\x00\\x5f\\xf4\\x05\\xa6\\x01\\x66\\x9d\\xed\\xa6\\xde\\x82\\x69\\x9f\\xd8\\ \\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\", "\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01\\x74\\x20\\x97\\x84\\xbd\\ \" qt_version = [int(v) for v in QtCore.qVersion().split('.')] if qt_version", "\\x9f\\x80\\xd0\\x16\\x12\\x2a\\x4f\\x41\\xd4\\x87\\xd1\\x31\\x02\\xca\\x2c\\x22\\ \\xb3\\x77\\x11\\x9d\\xba\\x12\\xef\\xba\\x18\\x66\\x74\\x24\\xee\\x3e\\x9f\\x41\\ \\x53\\x00\\xb8\\x80\\x28\\x25\\xf8\\x68\\xc6\\x4b\\x39\\x71\\xd2\\x30\\xc8\\xe6\\ \\x77\\xba\\x10\\x26\\xca\\x20\\x76\\xe1\\x3f\\x6f\\x01\\xee\\xd0\\x49\\xde\\xf0\\ \\xcd\\x23\\xd2\\x69\\xb1\\xc2\\x5f\\xcc\\x65\\x9d\\xd5\\x84\\x5b\\x45\\xa3\\xbf\\ \\x81\\x98\\x75\\x02\\xde\\xdf\\x79\\x95\\x46\\x6d\\x7b\\x4f\\xd7\\x1c\\x9a\\xad\\ \\xc0\\x2f\\x48\\x2f\\x21\\x8d\\xf4\\x48\\x5d\\x8b\\xa4\\x3b\\x0d\\x35\\x1c\\xb2\\ \\x73\\xfa\\x1b\\x16\\xda\\xd8\\x89\\x82\\xb5\\xa8\\xea\\x18\\x7a\\xc1\\x42\\xcf\\ \\x0b\\xad\\x13\\xfa\\x3d\\x8c\\xe2\\x9b\\x0d\\x75\\x9c\\x34\\xa4\\xa3\\xc8\\x6a\\ \\xb4\\x19\\x50\\x05\\xdd\\x25\\x63\\x92\\xc4\\x08\\x89\\x19\\xd2\\xf8\\x02\\xe9\\ \\xff\\x21\\x79\\xee\\x90\\x8c\\xcf\\x25\\xe3\\x4a\\xbb\\x7f\\x8c\\x6a\\xc9\\x78\\ \\x47\\x32\\x26\\x48\\xac\\x90\\xd8\\xa7\\xbd\\x48\\x11\\x9e\\x95\\xd8\\xa9\\x72\\ \\xa2\\xb2\\x58\\xa7\\x36\\x0e\\x4a\\x2c\\xd7\\x7a\\xb3\\x5c\\xa1\\xae\\x77\\xa9\\", "\\x26\\xd3\\x72\\xf4\\xb9\\xcc\\x11\\x63\\x0b\\xd8\\xef\\x82\\x3d\\x3e\\xe8\\xd2\\ \\x00\\x99\\x17\\x16\\xc1\\xf2\\xa9\\xf6\\x26\\x50\\x0f\\xac\\x84\\xe9\\x6f\\xd0\\ \\xf1\\x58\\x1b\\x7c\\x9b\\xe1\\x38\\xe7\\x3a\\xb8\\xf8\\xbf\\x50\\x93\\xe8\\x10\\ \\x76\\x13\\x24\\xc8\\x76\\x2f\\x29\\x7b\\xc8\\xa0\\x02\\xe2\\xd7\\xc3\\x0f\\x17\\ \\x62\\x5e\\x3e\\x8b\\x62\\x3e\\xa0\\x17\\x10\\xc1\\xe4\\x20\\x43\\x09\\xad\\xdb\\ \\x08\\xcd\\x26\\x8c\\x01\\x33\\xa1\\x4c\\x8c\\x05\\x76\\x25\\x3a\\x8a\\x5e\\x0c\\ \\xbe\\xcd\\xf0\\x78\\x15\\x6c\\x04\\x0e\\xdd\\x08\\x64\\x05\\x6c\\xb5\\x68\\xea\\ \\x81\\xe7\\x49\\x17\\x33\\xd7\\xc3\\x20\\xcb\\x76\\xcc\\x3e\\xa7\\x10\\x0f\\x10\\ \\x01\\x86\\x1e\\x87\\x8c\\xcb\\xdc\\x70\\xf4\\xaf\\xe0\\xeb\\x05\\x1a\\xe1\\xf2\\ \\xfd\\xe8\\xe5\\x0e\\xfc\\x2f\\x02\\xdb\\x6f\\x03\\xae\\xb0\\xe9\\x76\\x64\\x3d\\ \\x54\\x25\\x3a\\x2a\\x6f\\xc1\\x59\\xcb\\x66\\xea\\xdc\\x1d\\x58\\x18\\x74\\xd0\\ \\x07\\x6a\\x27\\x41\\x69\\x8c\\x94\\x21\\x8f\\x30\\x90\\x1a\\x92\\x9c\\xdf\\x5c\\ \\x1b\\xef\\x43\\x78\\xdd\\x2a\\xa8\\xc9\\xc5\\xc4\\x65\\x92\\x07\\x18\\x09\\xb6\\" ]
[ "2: x = x.flatten(2) lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0) # noinspection", ": int or tuple[] Dimension of input. n_output : int or tuple[] Dimension", "\\\\times n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input : int or tuple[]", "Returns the output layers according to above equation: .. math:: Layer(x) = activation(Wx", "input. n_output : int or tuple[] Dimension of output. activation : callback Activation", "def output(self, x, prob=True): \"\"\" Return output of layers Parameters ---------- x :", "n_input=None, n_output=None, activation=None): input_shape = n_input if isinstance(n_input, tuple) else (None, n_input) output_shape", "MLP. .. math:: Layer(x) = activation(Wx + b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W", "Dimension of input. n_output : int or tuple[] Dimension of output. activation :", "n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input : int or tuple[] Dimension", "sample prob : bool Flag for changing behavior of some layers. Returns -------", "\\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ----------", "if isinstance(n_input, tuple) else (None, n_input) output_shape = n_output if isinstance(n_output, tuple) else", ".. math:: Layer(x) = activation(Wx + b) \"\"\" if x.ndim > 2: x", "or tuple[] Dimension of input. n_output : int or tuple[] Dimension of output.", "activation(Wx + b) \"\"\" if x.ndim > 2: x = x.flatten(2) lin_output =", ":math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input :", "\\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input", "class Dense(Layer): \"\"\" Typical Layer of MLP. .. math:: Layer(x) = activation(Wx +", "activation(Wx + b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and", "the output layers according to above equation: .. math:: Layer(x) = activation(Wx +", "of output. activation : callback Activation function. \"\"\" def __init__(self, n_input=None, n_output=None, activation=None):", "math:: Layer(x) = activation(Wx + b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output}", "output of layers Parameters ---------- x : theano.tensor.matrix Input sample prob : bool", "lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable return ( lin_output", "else (None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self, x, prob=True): \"\"\" Return", "\\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input : int or tuple[] Dimension of input. n_output :", "(None, n_input) output_shape = n_output if isinstance(n_output, tuple) else (None, n_output) super(Dense, self).__init__(input_shape=input_shape,", "= T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable return ( lin_output if", "+ self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable return ( lin_output if self._non_linearity is None", "changing behavior of some layers. Returns ------- theano.tensor.matrix Returns the output layers according", "T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable return ( lin_output if self._non_linearity", "\"\"\" Return output of layers Parameters ---------- x : theano.tensor.matrix Input sample prob", "import Layer __all__ = ['Dense'] class Dense(Layer): \"\"\" Typical Layer of MLP. ..", "\\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input : int", ": theano.tensor.matrix Input sample prob : bool Flag for changing behavior of some", "int or tuple[] Dimension of input. n_output : int or tuple[] Dimension of", "n_output if isinstance(n_output, tuple) else (None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self,", "Layer(x) = activation(Wx + b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times", "Flag for changing behavior of some layers. Returns ------- theano.tensor.matrix Returns the output", "= activation(Wx + b) \"\"\" if x.ndim > 2: x = x.flatten(2) lin_output", ": bool Flag for changing behavior of some layers. Returns ------- theano.tensor.matrix Returns", "+ b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b", "n_output=None, activation=None): input_shape = n_input if isinstance(n_input, tuple) else (None, n_input) output_shape =", "---------- n_input : int or tuple[] Dimension of input. n_output : int or", "theano.tensor as T from .layer import Layer __all__ = ['Dense'] class Dense(Layer): \"\"\"", "output. activation : callback Activation function. \"\"\" def __init__(self, n_input=None, n_output=None, activation=None): input_shape", "x.flatten(2) lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable return (", "= activation(Wx + b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}`", "behavior of some layers. Returns ------- theano.tensor.matrix Returns the output layers according to", ".layer import Layer __all__ = ['Dense'] class Dense(Layer): \"\"\" Typical Layer of MLP.", "and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input : int or tuple[] Dimension of", "n_input) output_shape = n_output if isinstance(n_output, tuple) else (None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape,", "+ b) \"\"\" if x.ndim > 2: x = x.flatten(2) lin_output = T.dot(x,", "\\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input : int or", "layers Parameters ---------- x : theano.tensor.matrix Input sample prob : bool Flag for", "output layers according to above equation: .. math:: Layer(x) = activation(Wx + b)", "int or tuple[] Dimension of output. activation : callback Activation function. \"\"\" def", "to above equation: .. math:: Layer(x) = activation(Wx + b) \"\"\" if x.ndim", "tuple[] Dimension of input. n_output : int or tuple[] Dimension of output. activation", "# noinspection PyCallingNonCallable return ( lin_output if self._non_linearity is None else self._non_linearity(lin_output) )", "self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable return ( lin_output if self._non_linearity is None else", "of MLP. .. math:: Layer(x) = activation(Wx + b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`,", "layers according to above equation: .. math:: Layer(x) = activation(Wx + b) \"\"\"", "0) # noinspection PyCallingNonCallable return ( lin_output if self._non_linearity is None else self._non_linearity(lin_output)", "super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self, x, prob=True): \"\"\" Return output of layers", "---------- x : theano.tensor.matrix Input sample prob : bool Flag for changing behavior", "Returns ------- theano.tensor.matrix Returns the output layers according to above equation: .. math::", "------- theano.tensor.matrix Returns the output layers according to above equation: .. math:: Layer(x)", "Layer(x) = activation(Wx + b) \"\"\" if x.ndim > 2: x = x.flatten(2)", "self.get_W()) + self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable return ( lin_output if self._non_linearity is", "of layers Parameters ---------- x : theano.tensor.matrix Input sample prob : bool Flag", "n_input : int or tuple[] Dimension of input. n_output : int or tuple[]", "where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`.", "isinstance(n_input, tuple) else (None, n_input) output_shape = n_output if isinstance(n_output, tuple) else (None,", ":math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters", "(None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self, x, prob=True): \"\"\" Return output", "layers. Returns ------- theano.tensor.matrix Returns the output layers according to above equation: ..", "callback Activation function. \"\"\" def __init__(self, n_input=None, n_output=None, activation=None): input_shape = n_input if", "Return output of layers Parameters ---------- x : theano.tensor.matrix Input sample prob :", "theano.tensor.matrix Returns the output layers according to above equation: .. math:: Layer(x) =", "Dense(Layer): \"\"\" Typical Layer of MLP. .. math:: Layer(x) = activation(Wx + b)", "\"\"\" def __init__(self, n_input=None, n_output=None, activation=None): input_shape = n_input if isinstance(n_input, tuple) else", "if x.ndim > 2: x = x.flatten(2) lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x',", "Activation function. \"\"\" def __init__(self, n_input=None, n_output=None, activation=None): input_shape = n_input if isinstance(n_input,", "\"\"\" if x.ndim > 2: x = x.flatten(2) lin_output = T.dot(x, self.get_W()) +", "T from .layer import Layer __all__ = ['Dense'] class Dense(Layer): \"\"\" Typical Layer", "Layer of MLP. .. math:: Layer(x) = activation(Wx + b) where :math:`x \\\\in", "n_output : int or tuple[] Dimension of output. activation : callback Activation function.", "equation: .. math:: Layer(x) = activation(Wx + b) \"\"\" if x.ndim > 2:", "> 2: x = x.flatten(2) lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0) #", "self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self, x, prob=True): \"\"\" Return output of layers Parameters", "input_shape = n_input if isinstance(n_input, tuple) else (None, n_input) output_shape = n_output if", "x = x.flatten(2) lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable", "import theano.tensor as T from .layer import Layer __all__ = ['Dense'] class Dense(Layer):", "above equation: .. math:: Layer(x) = activation(Wx + b) \"\"\" if x.ndim >", "if isinstance(n_output, tuple) else (None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self, x,", "x.ndim > 2: x = x.flatten(2) lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0)", "= ['Dense'] class Dense(Layer): \"\"\" Typical Layer of MLP. .. math:: Layer(x) =", "or tuple[] Dimension of output. activation : callback Activation function. \"\"\" def __init__(self,", "non_linearity=activation) def output(self, x, prob=True): \"\"\" Return output of layers Parameters ---------- x", "theano.tensor.matrix Input sample prob : bool Flag for changing behavior of some layers.", "n_input if isinstance(n_input, tuple) else (None, n_input) output_shape = n_output if isinstance(n_output, tuple)", "\"\"\" Typical Layer of MLP. .. math:: Layer(x) = activation(Wx + b) where", "some layers. Returns ------- theano.tensor.matrix Returns the output layers according to above equation:", "Dimension of output. activation : callback Activation function. \"\"\" def __init__(self, n_input=None, n_output=None,", "b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in \\\\mathbb{R}^{n_{output} \\\\times n_{input}}` and :math:`b \\\\in", "n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self, x, prob=True): \"\"\" Return output of", "x, prob=True): \"\"\" Return output of layers Parameters ---------- x : theano.tensor.matrix Input", "= n_input if isinstance(n_input, tuple) else (None, n_input) output_shape = n_output if isinstance(n_output,", "isinstance(n_output, tuple) else (None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self, x, prob=True):", "output_shape = n_output if isinstance(n_output, tuple) else (None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation)", "Typical Layer of MLP. .. math:: Layer(x) = activation(Wx + b) where :math:`x", "bool Flag for changing behavior of some layers. Returns ------- theano.tensor.matrix Returns the", ":math:`b \\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input : int or tuple[] Dimension of input.", "__init__(self, n_input=None, n_output=None, activation=None): input_shape = n_input if isinstance(n_input, tuple) else (None, n_input)", "Layer __all__ = ['Dense'] class Dense(Layer): \"\"\" Typical Layer of MLP. .. math::", "output(self, x, prob=True): \"\"\" Return output of layers Parameters ---------- x : theano.tensor.matrix", "def __init__(self, n_input=None, n_output=None, activation=None): input_shape = n_input if isinstance(n_input, tuple) else (None,", "output_shape=output_shape, non_linearity=activation) def output(self, x, prob=True): \"\"\" Return output of layers Parameters ----------", "else (None, n_input) output_shape = n_output if isinstance(n_output, tuple) else (None, n_output) super(Dense,", "= n_output if isinstance(n_output, tuple) else (None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def", "prob=True): \"\"\" Return output of layers Parameters ---------- x : theano.tensor.matrix Input sample", "math:: Layer(x) = activation(Wx + b) \"\"\" if x.ndim > 2: x =", "from .layer import Layer __all__ = ['Dense'] class Dense(Layer): \"\"\" Typical Layer of", "of input. n_output : int or tuple[] Dimension of output. activation : callback", "tuple) else (None, n_input) output_shape = n_output if isinstance(n_output, tuple) else (None, n_output)", "for changing behavior of some layers. Returns ------- theano.tensor.matrix Returns the output layers", "tuple) else (None, n_output) super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation) def output(self, x, prob=True): \"\"\"", "Parameters ---------- x : theano.tensor.matrix Input sample prob : bool Flag for changing", "b) \"\"\" if x.ndim > 2: x = x.flatten(2) lin_output = T.dot(x, self.get_W())", ": callback Activation function. \"\"\" def __init__(self, n_input=None, n_output=None, activation=None): input_shape = n_input", "Parameters ---------- n_input : int or tuple[] Dimension of input. n_output : int", "x : theano.tensor.matrix Input sample prob : bool Flag for changing behavior of", "of some layers. Returns ------- theano.tensor.matrix Returns the output layers according to above", "['Dense'] class Dense(Layer): \"\"\" Typical Layer of MLP. .. math:: Layer(x) = activation(Wx", "= x.flatten(2) lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0) # noinspection PyCallingNonCallable return", "activation=None): input_shape = n_input if isinstance(n_input, tuple) else (None, n_input) output_shape = n_output", ": int or tuple[] Dimension of output. activation : callback Activation function. \"\"\"", "__all__ = ['Dense'] class Dense(Layer): \"\"\" Typical Layer of MLP. .. math:: Layer(x)", "\\\\in \\\\mathbb{R}^{n_{output}}`. Parameters ---------- n_input : int or tuple[] Dimension of input. n_output", "according to above equation: .. math:: Layer(x) = activation(Wx + b) \"\"\" if", "activation : callback Activation function. \"\"\" def __init__(self, n_input=None, n_output=None, activation=None): input_shape =", "Input sample prob : bool Flag for changing behavior of some layers. Returns", "prob : bool Flag for changing behavior of some layers. Returns ------- theano.tensor.matrix", "tuple[] Dimension of output. activation : callback Activation function. \"\"\" def __init__(self, n_input=None,", "as T from .layer import Layer __all__ = ['Dense'] class Dense(Layer): \"\"\" Typical", ".. math:: Layer(x) = activation(Wx + b) where :math:`x \\\\in \\\\mathbb{R}^{n_{output}}`, :math:`W \\\\in", "function. \"\"\" def __init__(self, n_input=None, n_output=None, activation=None): input_shape = n_input if isinstance(n_input, tuple)" ]
[ "state] return max(minDists) + (len(state) // 12) def hUnOpt(state): minDists = [targetsD[s] for", "range(len(board[i])): tup = (i, j) if tup in state and tup in targets:", "if nextState in visited: continue visited.add(nextState) if nextState in targetsD: targetsD[nextState] = min(steps", "res = [] # for i in range(len(board)): # for j in range(len(board[i])):", "getMoves() if not isWall(combineMove(state, _dirs[move]))] for nextState in aviableSteps: if nextState in visited:", "'#' def getInitialState(countPaths=True): # state => tuple(boxesTuple, keeperPos) global targets, board board =", "for move in getMoves()]: # vN = tuple(map(sum, zip(v, move))) # if not", "in targetsD: targetsD[nextState] = min(steps + 1, targetsD[nextState]) else: targetsD[nextState] = steps +", "return ['U', 'D', 'L', 'R'] def isWall(pos): (i, j) = pos return board[i][j]", "= set() for x in state: pos = combineMove(x, _dirs[move]) if isWall(pos): newState.add(x)", "in state: if s not in targets: return False return True def apllyMove(state,", "+ (len(state) // 12) def hUnOpt(state): minDists = [targetsD[s] for s in state]", "set(getIndexs(board, ['G', 'B'])) state = set(getIndexs(board, ['B', 'S'])) if countPaths: for t in", "for nextState in aviableSteps: if nextState in visited: continue visited.add(nextState) if nextState in", "range(len(board)): for j in range(len(board[i])): tup = (i, j) if tup in state", "0), 'D': (1, 0), 'R': (0, 1), 'L': (0, -1)} d = {}", "(i, j) if tup in state and tup in targets: print('B', end='') elif", "G: # for move in [_dirs[move] for move in getMoves()]: # vN =", "= 1 # for u in G: # for v1 in G: #", "v = (i, j) # if not isWall(v): # res.append(v) # return res", "# res = [] # for t in targets: # res.append(d[(v, t)]) #", "def warshalFloyd(G): # for v1 in G: # for v2 in G: #", "t)]) # targetsD[v] = min(res) def h(state): minDists = [] minDists = [targetsD[s]", "+ 1) ) ** 2 return res # def takeV(): # res =", "set() board = [] _dirs = {'U': (-1, 0), 'D': (1, 0), 'R':", "# state => tuple(boxesTuple, keeperPos) global targets, board board = readBoard() targets =", "max(minDists) + (len(state) // 12) def hUnOpt(state): minDists = [targetsD[s] for s in", "s not in targets: return False return True def apllyMove(state, move): newState =", "isWall(vN): # d[(v, vN)] = 1 # for u in G: # for", "= [combineMove(state, _dirs[move]) for move in getMoves() if not isWall(combineMove(state, _dirs[move]))] for nextState", "queue: (state, steps) = queue.popleft() aviableSteps = [combineMove(state, _dirs[move]) for move in getMoves()", "print() print() def getMoves(): return ['U', 'D', 'L', 'R'] def isWall(pos): (i, j)", "shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state): for s in state: if s not in", "# return res # def warshalFloyd(G): # for v1 in G: # for", "tuple(map(sum, zip(v, move))) # if not isWall(vN): # d[(v, vN)] = 1 #", "(i, j) = pos return board[i][j] == '#' def getInitialState(countPaths=True): # state =>", "in getMoves()]: # vN = tuple(map(sum, zip(v, move))) # if not isWall(vN): #", "targetsD[v] = min(res) def h(state): minDists = [] minDists = [targetsD[s] for s", "for t in targets: # res.append(d[(v, t)]) # targetsD[v] = min(res) def h(state):", "= set([pos]) queue = deque([(pos, 0)]) while queue: (state, steps) = queue.popleft() aviableSteps", "# for i in range(len(board)): # for j in range(len(board[i])): # v =", "res # def warshalFloyd(G): # for v1 in G: # for v2 in", "res += minDists[i] * ((i + 1) ) ** 2 return res #", "for i in range(len(board)): # for j in range(len(board[i])): # v = (i,", "= set() board = [] _dirs = {'U': (-1, 0), 'D': (1, 0),", "{} def shortestPathToGoal(pos): targetsD[pos] = 0 visited = set([pos]) queue = deque([(pos, 0)])", "print('G', end='') elif board[i][j] != '#': print(' ', end='') else: print('#', end='') print()", "in G: # for v2 in G: # d[(v1, v2)] = float('inf') #", "# res = [] # for i in range(len(board)): # for j in", "move in [_dirs[move] for move in getMoves()]: # vN = tuple(map(sum, zip(v, move)))", "readBoard() targets = set(getIndexs(board, ['G', 'B'])) state = set(getIndexs(board, ['B', 'S'])) if countPaths:", "in state: pos = combineMove(x, _dirs[move]) if isWall(pos): newState.add(x) else: newState.add(pos) return tuple(sorted(newState))", "move in getMoves(): states.append((apllyMove(state, move), move)) return states targetsD = {} def shortestPathToGoal(pos):", "targets: print('G', end='') elif board[i][j] != '#': print(' ', end='') else: print('#', end='')", "res.append(d[(v, t)]) # targetsD[v] = min(res) def h(state): minDists = [] minDists =", "d[(v1, v2)] > d[(v1, u)] + d[(u, v2)]: # d[(v1, v2)] = d[(v1,", "in state] minDists = sorted(minDists) res = 0 for i in range(len(minDists)): res", "move in getMoves() if not isWall(combineMove(state, _dirs[move]))] for nextState in aviableSteps: if nextState", "* from sokoban import * targets = set() board = [] _dirs =", "!= '#': print(' ', end='') else: print('#', end='') print() print() def getMoves(): return", "from sokobanASTAR import taxiDistance from IO_SI import * from sokoban import * targets", "for x in state: pos = combineMove(x, _dirs[move]) if isWall(pos): newState.add(x) else: newState.add(pos)", "move)) return states targetsD = {} def shortestPathToGoal(pos): targetsD[pos] = 0 visited =", "2 return res # def takeV(): # res = [] # for i", "# def countShortestPathToTarget(G): # for v in G: # res = [] #", "'D': (1, 0), 'R': (0, 1), 'L': (0, -1)} d = {} def", "# if not isWall(v): # res.append(v) # return res # def warshalFloyd(G): #", "+ 1 queue.append((nextState, steps + 1)) return # def countShortestPathToTarget(G): # for v", "targetsD[nextState] = steps + 1 queue.append((nextState, steps + 1)) return # def countShortestPathToTarget(G):", "v2 in G: # d[(v1, v2)] = float('inf') # for v in G:", "visited: continue visited.add(nextState) if nextState in targetsD: targetsD[nextState] = min(steps + 1, targetsD[nextState])", "isWin(state): for s in state: if s not in targets: return False return", "# for move in [_dirs[move] for move in getMoves()]: # vN = tuple(map(sum,", "visited.add(nextState) if nextState in targetsD: targetsD[nextState] = min(steps + 1, targetsD[nextState]) else: targetsD[nextState]", "1 # for u in G: # for v1 in G: # for", "newState.add(pos) return tuple(sorted(newState)) def genNewStates(state): states = [] for move in getMoves(): states.append((apllyMove(state,", "minDists = [targetsD[s] for s in state] return max(minDists) + (len(state) // 12)", "import taxiDistance from IO_SI import * from sokoban import * targets = set()", "+ 1, targetsD[nextState]) else: targetsD[nextState] = steps + 1 queue.append((nextState, steps + 1))", "in range(len(board)): for j in range(len(board[i])): tup = (i, j) if tup in", "else: newState.add(pos) return tuple(sorted(newState)) def genNewStates(state): states = [] for move in getMoves():", "= [] # for t in targets: # res.append(d[(v, t)]) # targetsD[v] =", "in getMoves(): states.append((apllyMove(state, move), move)) return states targetsD = {} def shortestPathToGoal(pos): targetsD[pos]", "_dirs[move]))] for nextState in aviableSteps: if nextState in visited: continue visited.add(nextState) if nextState", "zip(v, move))) # if not isWall(vN): # d[(v, vN)] = 1 # for", "takeV(): # res = [] # for i in range(len(board)): # for j", "i in range(len(board)): for j in range(len(board[i])): tup = (i, j) if tup", "range(len(board)): # for j in range(len(board[i])): # v = (i, j) # if", "deque([(pos, 0)]) while queue: (state, steps) = queue.popleft() aviableSteps = [combineMove(state, _dirs[move]) for", "= [] minDists = [targetsD[s] for s in state] return max(minDists) + (len(state)", "in G: # if d[(v1, v2)] > d[(v1, u)] + d[(u, v2)]: #", "for u in G: # for v1 in G: # for v2 in", "tup in state and tup in targets: print('B', end='') elif tup in state:", "s in state: if s not in targets: return False return True def", "s in state] return max(minDists) + (len(state) // 12) def hUnOpt(state): minDists =", "+= minDists[i] * ((i + 1) ) ** 2 return res # def", "targets = set(getIndexs(board, ['G', 'B'])) state = set(getIndexs(board, ['B', 'S'])) if countPaths: for", "h(state): minDists = [] minDists = [targetsD[s] for s in state] return max(minDists)", "apllyMove(state, move): newState = set() for x in state: pos = combineMove(x, _dirs[move])", "taxiDistance from IO_SI import * from sokoban import * targets = set() board", "set([pos]) queue = deque([(pos, 0)]) while queue: (state, steps) = queue.popleft() aviableSteps =", "targets: return False return True def apllyMove(state, move): newState = set() for x", "* targets = set() board = [] _dirs = {'U': (-1, 0), 'D':", "['U', 'D', 'L', 'R'] def isWall(pos): (i, j) = pos return board[i][j] ==", "for t in targets: shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state): for s in state:", "for v1 in G: # for v2 in G: # d[(v1, v2)] =", "1), 'L': (0, -1)} d = {} def showBoard(state): for i in range(len(board)):", "aviableSteps = [combineMove(state, _dirs[move]) for move in getMoves() if not isWall(combineMove(state, _dirs[move]))] for", "= [] _dirs = {'U': (-1, 0), 'D': (1, 0), 'R': (0, 1),", "j) # if not isWall(v): # res.append(v) # return res # def warshalFloyd(G):", "v in G: # for move in [_dirs[move] for move in getMoves()]: #", "v2)] = float('inf') # for v in G: # for move in [_dirs[move]", "def takeV(): # res = [] # for i in range(len(board)): # for", "hUnOpt(state): minDists = [targetsD[s] for s in state] minDists = sorted(minDists) res =", "return max(minDists) + (len(state) // 12) def hUnOpt(state): minDists = [targetsD[s] for s", "queue.append((nextState, steps + 1)) return # def countShortestPathToTarget(G): # for v in G:", "not isWall(vN): # d[(v, vN)] = 1 # for u in G: #", "state: pos = combineMove(x, _dirs[move]) if isWall(pos): newState.add(x) else: newState.add(pos) return tuple(sorted(newState)) def", "isWall(pos): (i, j) = pos return board[i][j] == '#' def getInitialState(countPaths=True): # state", "x in state: pos = combineMove(x, _dirs[move]) if isWall(pos): newState.add(x) else: newState.add(pos) return", "nextState in aviableSteps: if nextState in visited: continue visited.add(nextState) if nextState in targetsD:", "minDists = [targetsD[s] for s in state] minDists = sorted(minDists) res = 0", "combineMove(x, _dirs[move]) if isWall(pos): newState.add(x) else: newState.add(pos) return tuple(sorted(newState)) def genNewStates(state): states =", "# res.append(v) # return res # def warshalFloyd(G): # for v1 in G:", "in [_dirs[move] for move in getMoves()]: # vN = tuple(map(sum, zip(v, move))) #", "in range(len(board[i])): tup = (i, j) if tup in state and tup in", "board[i][j] != '#': print(' ', end='') else: print('#', end='') print() print() def getMoves():", "move in getMoves()]: # vN = tuple(map(sum, zip(v, move))) # if not isWall(vN):", "# for v2 in G: # if d[(v1, v2)] > d[(v1, u)] +", "= min(res) def h(state): minDists = [] minDists = [targetsD[s] for s in", "# for v in G: # for move in [_dirs[move] for move in", "_dirs = {'U': (-1, 0), 'D': (1, 0), 'R': (0, 1), 'L': (0,", "# d[(v, vN)] = 1 # for u in G: # for v1", "[targetsD[s] for s in state] return max(minDists) + (len(state) // 12) def hUnOpt(state):", "=> tuple(boxesTuple, keeperPos) global targets, board board = readBoard() targets = set(getIndexs(board, ['G',", "= sorted(minDists) res = 0 for i in range(len(minDists)): res += minDists[i] *", "steps) = queue.popleft() aviableSteps = [combineMove(state, _dirs[move]) for move in getMoves() if not", "pos = combineMove(x, _dirs[move]) if isWall(pos): newState.add(x) else: newState.add(pos) return tuple(sorted(newState)) def genNewStates(state):", "return True def apllyMove(state, move): newState = set() for x in state: pos", "= [] for move in getMoves(): states.append((apllyMove(state, move), move)) return states targetsD =", "return res # def takeV(): # res = [] # for i in", "nextState in targetsD: targetsD[nextState] = min(steps + 1, targetsD[nextState]) else: targetsD[nextState] = steps", "= {} def showBoard(state): for i in range(len(board)): for j in range(len(board[i])): tup", "v in G: # res = [] # for t in targets: #", "v1 in G: # for v2 in G: # d[(v1, v2)] = float('inf')", "minDists = [] minDists = [targetsD[s] for s in state] return max(minDists) +", "vN = tuple(map(sum, zip(v, move))) # if not isWall(vN): # d[(v, vN)] =", "(0, -1)} d = {} def showBoard(state): for i in range(len(board)): for j", "** 2 return res # def takeV(): # res = [] # for", "for i in range(len(minDists)): res += minDists[i] * ((i + 1) ) **", "tup in targets: print('G', end='') elif board[i][j] != '#': print(' ', end='') else:", "end='') elif tup in state: print('S', end='') elif tup in targets: print('G', end='')", "tuple(sorted(newState)) def genNewStates(state): states = [] for move in getMoves(): states.append((apllyMove(state, move), move))", "in G: # for move in [_dirs[move] for move in getMoves()]: # vN", "keeperPos) global targets, board board = readBoard() targets = set(getIndexs(board, ['G', 'B'])) state", "for j in range(len(board[i])): tup = (i, j) if tup in state and", "def genNewStates(state): states = [] for move in getMoves(): states.append((apllyMove(state, move), move)) return", "G: # if d[(v1, v2)] > d[(v1, u)] + d[(u, v2)]: # d[(v1,", "[] # for i in range(len(board)): # for j in range(len(board[i])): # v", "['B', 'S'])) if countPaths: for t in targets: shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state):", "= {'U': (-1, 0), 'D': (1, 0), 'R': (0, 1), 'L': (0, -1)}", "= pos return board[i][j] == '#' def getInitialState(countPaths=True): # state => tuple(boxesTuple, keeperPos)", "nextState in visited: continue visited.add(nextState) if nextState in targetsD: targetsD[nextState] = min(steps +", "end='') print() print() def getMoves(): return ['U', 'D', 'L', 'R'] def isWall(pos): (i,", "12) def hUnOpt(state): minDists = [targetsD[s] for s in state] minDists = sorted(minDists)", "return states targetsD = {} def shortestPathToGoal(pos): targetsD[pos] = 0 visited = set([pos])", "in targets: # res.append(d[(v, t)]) # targetsD[v] = min(res) def h(state): minDists =", "import * targets = set() board = [] _dirs = {'U': (-1, 0),", "return # def countShortestPathToTarget(G): # for v in G: # res = []", "# for t in targets: # res.append(d[(v, t)]) # targetsD[v] = min(res) def", "elif tup in state: print('S', end='') elif tup in targets: print('G', end='') elif", "'#': print(' ', end='') else: print('#', end='') print() print() def getMoves(): return ['U',", "if countPaths: for t in targets: shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state): for s", "(-1, 0), 'D': (1, 0), 'R': (0, 1), 'L': (0, -1)} d =", "return res # def warshalFloyd(G): # for v1 in G: # for v2", "isWall(v): # res.append(v) # return res # def warshalFloyd(G): # for v1 in", "min(res) def h(state): minDists = [] minDists = [targetsD[s] for s in state]", "# for v in G: # res = [] # for t in", "res = 0 for i in range(len(minDists)): res += minDists[i] * ((i +", "return tuple(sorted(newState)) def genNewStates(state): states = [] for move in getMoves(): states.append((apllyMove(state, move),", "'D', 'L', 'R'] def isWall(pos): (i, j) = pos return board[i][j] == '#'", "return False return True def apllyMove(state, move): newState = set() for x in", "for j in range(len(board[i])): # v = (i, j) # if not isWall(v):", "0), 'R': (0, 1), 'L': (0, -1)} d = {} def showBoard(state): for", "# for u in G: # for v1 in G: # for v2", "in state: print('S', end='') elif tup in targets: print('G', end='') elif board[i][j] !=", "[targetsD[s] for s in state] minDists = sorted(minDists) res = 0 for i", "targetsD[pos] = 0 visited = set([pos]) queue = deque([(pos, 0)]) while queue: (state,", "print() def getMoves(): return ['U', 'D', 'L', 'R'] def isWall(pos): (i, j) =", "for s in state: if s not in targets: return False return True", "G: # for v2 in G: # if d[(v1, v2)] > d[(v1, u)]", "end='') else: print('#', end='') print() print() def getMoves(): return ['U', 'D', 'L', 'R']", "def getInitialState(countPaths=True): # state => tuple(boxesTuple, keeperPos) global targets, board board = readBoard()", "print('#', end='') print() print() def getMoves(): return ['U', 'D', 'L', 'R'] def isWall(pos):", "{} def showBoard(state): for i in range(len(board)): for j in range(len(board[i])): tup =", "tup in state: print('S', end='') elif tup in targets: print('G', end='') elif board[i][j]", "in range(len(board[i])): # v = (i, j) # if not isWall(v): # res.append(v)", "[] _dirs = {'U': (-1, 0), 'D': (1, 0), 'R': (0, 1), 'L':", "= 0 visited = set([pos]) queue = deque([(pos, 0)]) while queue: (state, steps)", "sokobanASTAR import taxiDistance from IO_SI import * from sokoban import * targets =", "getMoves(): return ['U', 'D', 'L', 'R'] def isWall(pos): (i, j) = pos return", "= min(steps + 1, targetsD[nextState]) else: targetsD[nextState] = steps + 1 queue.append((nextState, steps", "pos return board[i][j] == '#' def getInitialState(countPaths=True): # state => tuple(boxesTuple, keeperPos) global", "== '#' def getInitialState(countPaths=True): # state => tuple(boxesTuple, keeperPos) global targets, board board", "in G: # res = [] # for t in targets: # res.append(d[(v,", "d = {} def showBoard(state): for i in range(len(board)): for j in range(len(board[i])):", "= 0 for i in range(len(minDists)): res += minDists[i] * ((i + 1)", "False return True def apllyMove(state, move): newState = set() for x in state:", "if isWall(pos): newState.add(x) else: newState.add(pos) return tuple(sorted(newState)) def genNewStates(state): states = [] for", "# for j in range(len(board[i])): # v = (i, j) # if not", "newState.add(x) else: newState.add(pos) return tuple(sorted(newState)) def genNewStates(state): states = [] for move in", "getMoves(): states.append((apllyMove(state, move), move)) return states targetsD = {} def shortestPathToGoal(pos): targetsD[pos] =", "G: # for v2 in G: # d[(v1, v2)] = float('inf') # for", "['G', 'B'])) state = set(getIndexs(board, ['B', 'S'])) if countPaths: for t in targets:", "in targets: return False return True def apllyMove(state, move): newState = set() for", "tuple(sorted(state)) def isWin(state): for s in state: if s not in targets: return", "def hUnOpt(state): minDists = [targetsD[s] for s in state] minDists = sorted(minDists) res", "continue visited.add(nextState) if nextState in targetsD: targetsD[nextState] = min(steps + 1, targetsD[nextState]) else:", "# d[(v1, v2)] = float('inf') # for v in G: # for move", "showBoard(state): for i in range(len(board)): for j in range(len(board[i])): tup = (i, j)", "IO_SI import * from sokoban import * targets = set() board = []", "state => tuple(boxesTuple, keeperPos) global targets, board board = readBoard() targets = set(getIndexs(board,", "in targets: print('B', end='') elif tup in state: print('S', end='') elif tup in", "states.append((apllyMove(state, move), move)) return states targetsD = {} def shortestPathToGoal(pos): targetsD[pos] = 0", "steps + 1)) return # def countShortestPathToTarget(G): # for v in G: #", "shortestPathToGoal(pos): targetsD[pos] = 0 visited = set([pos]) queue = deque([(pos, 0)]) while queue:", "[] minDists = [targetsD[s] for s in state] return max(minDists) + (len(state) //", "end='') elif tup in targets: print('G', end='') elif board[i][j] != '#': print(' ',", "j in range(len(board[i])): # v = (i, j) # if not isWall(v): #", "minDists[i] * ((i + 1) ) ** 2 return res # def takeV():", "set(getIndexs(board, ['B', 'S'])) if countPaths: for t in targets: shortestPathToGoal(t) return tuple(sorted(state)) def", "range(len(minDists)): res += minDists[i] * ((i + 1) ) ** 2 return res", "else: print('#', end='') print() print() def getMoves(): return ['U', 'D', 'L', 'R'] def", "for v2 in G: # if d[(v1, v2)] > d[(v1, u)] + d[(u,", "= set(getIndexs(board, ['G', 'B'])) state = set(getIndexs(board, ['B', 'S'])) if countPaths: for t", "print('S', end='') elif tup in targets: print('G', end='') elif board[i][j] != '#': print('", "[] for move in getMoves(): states.append((apllyMove(state, move), move)) return states targetsD = {}", "= (i, j) # if not isWall(v): # res.append(v) # return res #", "else: targetsD[nextState] = steps + 1 queue.append((nextState, steps + 1)) return # def", "j) = pos return board[i][j] == '#' def getInitialState(countPaths=True): # state => tuple(boxesTuple,", "elif tup in targets: print('G', end='') elif board[i][j] != '#': print(' ', end='')", "0 for i in range(len(minDists)): res += minDists[i] * ((i + 1) )", "elif board[i][j] != '#': print(' ', end='') else: print('#', end='') print() print() def", "in visited: continue visited.add(nextState) if nextState in targetsD: targetsD[nextState] = min(steps + 1,", "# def warshalFloyd(G): # for v1 in G: # for v2 in G:", "for v2 in G: # d[(v1, v2)] = float('inf') # for v in", "move), move)) return states targetsD = {} def shortestPathToGoal(pos): targetsD[pos] = 0 visited", "targets, board board = readBoard() targets = set(getIndexs(board, ['G', 'B'])) state = set(getIndexs(board,", "in getMoves() if not isWall(combineMove(state, _dirs[move]))] for nextState in aviableSteps: if nextState in", "if not isWall(vN): # d[(v, vN)] = 1 # for u in G:", "warshalFloyd(G): # for v1 in G: # for v2 in G: # d[(v1,", "state: print('S', end='') elif tup in targets: print('G', end='') elif board[i][j] != '#':", "= [] # for i in range(len(board)): # for j in range(len(board[i])): #", "queue.popleft() aviableSteps = [combineMove(state, _dirs[move]) for move in getMoves() if not isWall(combineMove(state, _dirs[move]))]", "minDists = sorted(minDists) res = 0 for i in range(len(minDists)): res += minDists[i]", "* ((i + 1) ) ** 2 return res # def takeV(): #", "for move in [_dirs[move] for move in getMoves()]: # vN = tuple(map(sum, zip(v,", "= [targetsD[s] for s in state] return max(minDists) + (len(state) // 12) def", "', end='') else: print('#', end='') print() print() def getMoves(): return ['U', 'D', 'L',", "state and tup in targets: print('B', end='') elif tup in state: print('S', end='')", "def h(state): minDists = [] minDists = [targetsD[s] for s in state] return", "in range(len(minDists)): res += minDists[i] * ((i + 1) ) ** 2 return", "# for v1 in G: # for v2 in G: # d[(v1, v2)]", "print('B', end='') elif tup in state: print('S', end='') elif tup in targets: print('G',", "if not isWall(combineMove(state, _dirs[move]))] for nextState in aviableSteps: if nextState in visited: continue", "in state] return max(minDists) + (len(state) // 12) def hUnOpt(state): minDists = [targetsD[s]", "float('inf') # for v in G: # for move in [_dirs[move] for move", "set() for x in state: pos = combineMove(x, _dirs[move]) if isWall(pos): newState.add(x) else:", "getMoves()]: # vN = tuple(map(sum, zip(v, move))) # if not isWall(vN): # d[(v,", "targetsD[nextState] = min(steps + 1, targetsD[nextState]) else: targetsD[nextState] = steps + 1 queue.append((nextState,", "v1 in G: # for v2 in G: # if d[(v1, v2)] >", "while queue: (state, steps) = queue.popleft() aviableSteps = [combineMove(state, _dirs[move]) for move in", "> d[(v1, u)] + d[(u, v2)]: # d[(v1, v2)] = d[(v1, u)] +", "res.append(v) # return res # def warshalFloyd(G): # for v1 in G: #", "d[(v, vN)] = 1 # for u in G: # for v1 in", "if nextState in targetsD: targetsD[nextState] = min(steps + 1, targetsD[nextState]) else: targetsD[nextState] =", "# vN = tuple(map(sum, zip(v, move))) # if not isWall(vN): # d[(v, vN)]", "def isWin(state): for s in state: if s not in targets: return False", "targetsD[nextState]) else: targetsD[nextState] = steps + 1 queue.append((nextState, steps + 1)) return #", "sorted(minDists) res = 0 for i in range(len(minDists)): res += minDists[i] * ((i", "not isWall(v): # res.append(v) # return res # def warshalFloyd(G): # for v1", "= (i, j) if tup in state and tup in targets: print('B', end='')", "not isWall(combineMove(state, _dirs[move]))] for nextState in aviableSteps: if nextState in visited: continue visited.add(nextState)", "(i, j) # if not isWall(v): # res.append(v) # return res # def", "0 visited = set([pos]) queue = deque([(pos, 0)]) while queue: (state, steps) =", "def countShortestPathToTarget(G): # for v in G: # res = [] # for", "def getMoves(): return ['U', 'D', 'L', 'R'] def isWall(pos): (i, j) = pos", "s in state] minDists = sorted(minDists) res = 0 for i in range(len(minDists)):", "board board = readBoard() targets = set(getIndexs(board, ['G', 'B'])) state = set(getIndexs(board, ['B',", "= deque([(pos, 0)]) while queue: (state, steps) = queue.popleft() aviableSteps = [combineMove(state, _dirs[move])", "global targets, board board = readBoard() targets = set(getIndexs(board, ['G', 'B'])) state =", "states = [] for move in getMoves(): states.append((apllyMove(state, move), move)) return states targetsD", "((i + 1) ) ** 2 return res # def takeV(): # res", "= set(getIndexs(board, ['B', 'S'])) if countPaths: for t in targets: shortestPathToGoal(t) return tuple(sorted(state))", "t in targets: # res.append(d[(v, t)]) # targetsD[v] = min(res) def h(state): minDists", "G: # res = [] # for t in targets: # res.append(d[(v, t)])", "in range(len(board)): # for j in range(len(board[i])): # v = (i, j) #", "d[(v1, u)] + d[(u, v2)]: # d[(v1, v2)] = d[(v1, u)] + d[(u,", "def apllyMove(state, move): newState = set() for x in state: pos = combineMove(x,", "t in targets: shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state): for s in state: if", "board[i][j] == '#' def getInitialState(countPaths=True): # state => tuple(boxesTuple, keeperPos) global targets, board", "in G: # for v2 in G: # if d[(v1, v2)] > d[(v1,", "getInitialState(countPaths=True): # state => tuple(boxesTuple, keeperPos) global targets, board board = readBoard() targets", "-1)} d = {} def showBoard(state): for i in range(len(board)): for j in", "u in G: # for v1 in G: # for v2 in G:", "# if d[(v1, v2)] > d[(v1, u)] + d[(u, v2)]: # d[(v1, v2)]", "i in range(len(board)): # for j in range(len(board[i])): # v = (i, j)", "(len(state) // 12) def hUnOpt(state): minDists = [targetsD[s] for s in state] minDists", "targets: # res.append(d[(v, t)]) # targetsD[v] = min(res) def h(state): minDists = []", "return board[i][j] == '#' def getInitialState(countPaths=True): # state => tuple(boxesTuple, keeperPos) global targets,", "state] minDists = sorted(minDists) res = 0 for i in range(len(minDists)): res +=", "'R': (0, 1), 'L': (0, -1)} d = {} def showBoard(state): for i", "from sokoban import * targets = set() board = [] _dirs = {'U':", "'R'] def isWall(pos): (i, j) = pos return board[i][j] == '#' def getInitialState(countPaths=True):", "board = [] _dirs = {'U': (-1, 0), 'D': (1, 0), 'R': (0,", "targets = set() board = [] _dirs = {'U': (-1, 0), 'D': (1,", "vN)] = 1 # for u in G: # for v1 in G:", "_dirs[move]) for move in getMoves() if not isWall(combineMove(state, _dirs[move]))] for nextState in aviableSteps:", "for s in state] return max(minDists) + (len(state) // 12) def hUnOpt(state): minDists", "for v in G: # res = [] # for t in targets:", "for i in range(len(board)): for j in range(len(board[i])): tup = (i, j) if", "and tup in targets: print('B', end='') elif tup in state: print('S', end='') elif", "# for v1 in G: # for v2 in G: # if d[(v1,", "from IO_SI import * from sokoban import * targets = set() board =", "_dirs[move]) if isWall(pos): newState.add(x) else: newState.add(pos) return tuple(sorted(newState)) def genNewStates(state): states = []", "queue = deque([(pos, 0)]) while queue: (state, steps) = queue.popleft() aviableSteps = [combineMove(state,", "1) ) ** 2 return res # def takeV(): # res = []", "{'U': (-1, 0), 'D': (1, 0), 'R': (0, 1), 'L': (0, -1)} d", "if d[(v1, v2)] > d[(v1, u)] + d[(u, v2)]: # d[(v1, v2)] =", "in state and tup in targets: print('B', end='') elif tup in state: print('S',", "def isWall(pos): (i, j) = pos return board[i][j] == '#' def getInitialState(countPaths=True): #", "tup in targets: print('B', end='') elif tup in state: print('S', end='') elif tup", "[combineMove(state, _dirs[move]) for move in getMoves() if not isWall(combineMove(state, _dirs[move]))] for nextState in", "targetsD = {} def shortestPathToGoal(pos): targetsD[pos] = 0 visited = set([pos]) queue =", "tuple(boxesTuple, keeperPos) global targets, board board = readBoard() targets = set(getIndexs(board, ['G', 'B']))", "= readBoard() targets = set(getIndexs(board, ['G', 'B'])) state = set(getIndexs(board, ['B', 'S'])) if", "if s not in targets: return False return True def apllyMove(state, move): newState", "= float('inf') # for v in G: # for move in [_dirs[move] for", "def showBoard(state): for i in range(len(board)): for j in range(len(board[i])): tup = (i,", "for move in getMoves() if not isWall(combineMove(state, _dirs[move]))] for nextState in aviableSteps: if", "# def takeV(): # res = [] # for i in range(len(board)): #", "min(steps + 1, targetsD[nextState]) else: targetsD[nextState] = steps + 1 queue.append((nextState, steps +", "isWall(combineMove(state, _dirs[move]))] for nextState in aviableSteps: if nextState in visited: continue visited.add(nextState) if", "= tuple(map(sum, zip(v, move))) # if not isWall(vN): # d[(v, vN)] = 1", "for s in state] minDists = sorted(minDists) res = 0 for i in", "end='') elif board[i][j] != '#': print(' ', end='') else: print('#', end='') print() print()", "sokoban import * targets = set() board = [] _dirs = {'U': (-1,", "'L': (0, -1)} d = {} def showBoard(state): for i in range(len(board)): for", "targets: shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state): for s in state: if s not", "[_dirs[move] for move in getMoves()]: # vN = tuple(map(sum, zip(v, move))) # if", "for v in G: # for move in [_dirs[move] for move in getMoves()]:", "(0, 1), 'L': (0, -1)} d = {} def showBoard(state): for i in", "u)] + d[(u, v2)]: # d[(v1, v2)] = d[(v1, u)] + d[(u, v2)]", "countPaths: for t in targets: shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state): for s in", "steps + 1 queue.append((nextState, steps + 1)) return # def countShortestPathToTarget(G): # for", "in G: # for v1 in G: # for v2 in G: #", "j in range(len(board[i])): tup = (i, j) if tup in state and tup", ") ** 2 return res # def takeV(): # res = [] #", "= {} def shortestPathToGoal(pos): targetsD[pos] = 0 visited = set([pos]) queue = deque([(pos,", "j) if tup in state and tup in targets: print('B', end='') elif tup", "// 12) def hUnOpt(state): minDists = [targetsD[s] for s in state] minDists =", "(state, steps) = queue.popleft() aviableSteps = [combineMove(state, _dirs[move]) for move in getMoves() if", "1 queue.append((nextState, steps + 1)) return # def countShortestPathToTarget(G): # for v in", "= combineMove(x, _dirs[move]) if isWall(pos): newState.add(x) else: newState.add(pos) return tuple(sorted(newState)) def genNewStates(state): states", "# res.append(d[(v, t)]) # targetsD[v] = min(res) def h(state): minDists = [] minDists", "'B'])) state = set(getIndexs(board, ['B', 'S'])) if countPaths: for t in targets: shortestPathToGoal(t)", "for v1 in G: # for v2 in G: # if d[(v1, v2)]", "states targetsD = {} def shortestPathToGoal(pos): targetsD[pos] = 0 visited = set([pos]) queue", "G: # for v1 in G: # for v2 in G: # if", "in targets: print('G', end='') elif board[i][j] != '#': print(' ', end='') else: print('#',", "'S'])) if countPaths: for t in targets: shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state): for", "1)) return # def countShortestPathToTarget(G): # for v in G: # res =", "if not isWall(v): # res.append(v) # return res # def warshalFloyd(G): # for", "state = set(getIndexs(board, ['B', 'S'])) if countPaths: for t in targets: shortestPathToGoal(t) return", "range(len(board[i])): # v = (i, j) # if not isWall(v): # res.append(v) #", "countShortestPathToTarget(G): # for v in G: # res = [] # for t", "def shortestPathToGoal(pos): targetsD[pos] = 0 visited = set([pos]) queue = deque([(pos, 0)]) while", "True def apllyMove(state, move): newState = set() for x in state: pos =", "aviableSteps: if nextState in visited: continue visited.add(nextState) if nextState in targetsD: targetsD[nextState] =", "if tup in state and tup in targets: print('B', end='') elif tup in", "i in range(len(minDists)): res += minDists[i] * ((i + 1) ) ** 2", "board = readBoard() targets = set(getIndexs(board, ['G', 'B'])) state = set(getIndexs(board, ['B', 'S']))", "[] # for t in targets: # res.append(d[(v, t)]) # targetsD[v] = min(res)", "v2 in G: # if d[(v1, v2)] > d[(v1, u)] + d[(u, v2)]:", "not in targets: return False return True def apllyMove(state, move): newState = set()", "res = [] # for t in targets: # res.append(d[(v, t)]) # targetsD[v]", "tup = (i, j) if tup in state and tup in targets: print('B',", "for move in getMoves(): states.append((apllyMove(state, move), move)) return states targetsD = {} def", "# if not isWall(vN): # d[(v, vN)] = 1 # for u in", "move))) # if not isWall(vN): # d[(v, vN)] = 1 # for u", "+ 1)) return # def countShortestPathToTarget(G): # for v in G: # res", "# targetsD[v] = min(res) def h(state): minDists = [] minDists = [targetsD[s] for", "v2)] > d[(v1, u)] + d[(u, v2)]: # d[(v1, v2)] = d[(v1, u)]", "newState = set() for x in state: pos = combineMove(x, _dirs[move]) if isWall(pos):", "# v = (i, j) # if not isWall(v): # res.append(v) # return", "= [targetsD[s] for s in state] minDists = sorted(minDists) res = 0 for", "res # def takeV(): # res = [] # for i in range(len(board)):", "print(' ', end='') else: print('#', end='') print() print() def getMoves(): return ['U', 'D',", "visited = set([pos]) queue = deque([(pos, 0)]) while queue: (state, steps) = queue.popleft()", "return tuple(sorted(state)) def isWin(state): for s in state: if s not in targets:", "= queue.popleft() aviableSteps = [combineMove(state, _dirs[move]) for move in getMoves() if not isWall(combineMove(state,", "(1, 0), 'R': (0, 1), 'L': (0, -1)} d = {} def showBoard(state):", "move): newState = set() for x in state: pos = combineMove(x, _dirs[move]) if", "genNewStates(state): states = [] for move in getMoves(): states.append((apllyMove(state, move), move)) return states", "in aviableSteps: if nextState in visited: continue visited.add(nextState) if nextState in targetsD: targetsD[nextState]", "targetsD: targetsD[nextState] = min(steps + 1, targetsD[nextState]) else: targetsD[nextState] = steps + 1", "state: if s not in targets: return False return True def apllyMove(state, move):", "= steps + 1 queue.append((nextState, steps + 1)) return # def countShortestPathToTarget(G): #", "'L', 'R'] def isWall(pos): (i, j) = pos return board[i][j] == '#' def", "import * from sokoban import * targets = set() board = [] _dirs", "in targets: shortestPathToGoal(t) return tuple(sorted(state)) def isWin(state): for s in state: if s", "# for v2 in G: # d[(v1, v2)] = float('inf') # for v", "0)]) while queue: (state, steps) = queue.popleft() aviableSteps = [combineMove(state, _dirs[move]) for move", "isWall(pos): newState.add(x) else: newState.add(pos) return tuple(sorted(newState)) def genNewStates(state): states = [] for move", "G: # d[(v1, v2)] = float('inf') # for v in G: # for", "in G: # d[(v1, v2)] = float('inf') # for v in G: #", "targets: print('B', end='') elif tup in state: print('S', end='') elif tup in targets:", "1, targetsD[nextState]) else: targetsD[nextState] = steps + 1 queue.append((nextState, steps + 1)) return", "d[(v1, v2)] = float('inf') # for v in G: # for move in" ]
[ "PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12) & Q(sessions__start__year=YEAR_NOW", "YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())),", "MONTH_NOW > 2 else YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())),", "Cast from django.utils import timezone from .models import ControllerSession from ..users.models import User,", "Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def", "for session in sessions: position = session.facility + '_' + session.level if position", "Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW -", "get_top_controllers(): \"\"\" Returns query set of active users annotated with controlling hour sums", "filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def", "MONTH_NOW > 1 else YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if", "= session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position, 'hours': position_durations[position]} for", "penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW)", "if MONTH_NOW > 1 else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else", "sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {} for session in sessions: position", "of the given year annotated with the controlling hours for that day. \"\"\"", "else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1)) return", "the given year annotated with the controlling hours for that day. \"\"\" sessions", "from django.utils import timezone from .models import ControllerSession from ..users.models import User, Status", "with no hours are not included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year)", "in sorted_positions] def get_daily_statistics(year, user=None): \"\"\" Returns a query dictionary of every day", "DurationField from django.db.models.functions import Coalesce, Cast from django.utils import timezone from .models import", "hours (controllers with no hours are not included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR", "of every day of the given year annotated with the controlling hours for", "set of active users annotated with controlling hours for the current (curr_hours), previous", "controlling hours for that day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year) if user: sessions =", "filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year)", "(prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns query set of active", "PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if MONTH_NOW", "= Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {}", "Returns query set of active users annotated with controlling hours for the current", "timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW", "(hours) sorted by most controlling hours (controllers with no hours are not included).", "annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns query set of active users annotated with controlling", "not included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users", "session.duration else: position_durations[position] = session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position,", "DurationField())), ) def get_user_hours(): \"\"\" Returns query set of active users annotated with", "import User, Status def annotate_hours(query): \"\"\" Annotates given QuerySet with controlling hours for", "previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year", "of active users annotated with controlling hour sums for the current month (hours)", "[{'position': position, 'hours': position_durations[position]} for position in sorted_positions] def get_daily_statistics(year, user=None): \"\"\" Returns", "& Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration',", "= {} for session in sessions: position = session.facility + '_' + session.level", "DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours(): \"\"\"", "(controllers with no hours are not included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR =", "if MONTH_NOW > 1 else YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2", "penultimate (prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns query set of", "in sessions: position = session.facility + '_' + session.level if position in position_durations:", "key=position_durations.get, reverse=True) return [{'position': position, 'hours': position_durations[position]} for position in sorted_positions] def get_daily_statistics(year,", "'hours': position_durations[position]} for position in sorted_positions] def get_daily_statistics(year, user=None): \"\"\" Returns a query", "with controlling hour sums for the current month (hours) sorted by most controlling", "- 2 if MONTH_NOW > 2 else 12 if MONTH_NOW > 1 else", "users annotated with controlling hour sums for the current month (hours) sorted by", "YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else", "filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours(): \"\"\" Returns query", "active users annotated with controlling hours for the current (curr_hours), previous (prev_hours), and", "timezone from .models import ControllerSession from ..users.models import User, Status def annotate_hours(query): \"\"\"", "hours for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW", "current month (hours) sorted by most controlling hours (controllers with no hours are", "2 else 12 if MONTH_NOW > 1 else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW", "for position in sorted_positions] def get_daily_statistics(year, user=None): \"\"\" Returns a query dictionary of", "users annotated with controlling hours for the current (curr_hours), previous (prev_hours), and penultimate", "\"\"\" Returns query set of active users annotated with controlling hour sums for", "= Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH &", "and penultimate (prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns query set", "11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1)) return query.annotate(", "def annotate_hours(query): \"\"\" Annotates given QuerySet with controlling hours for the current (curr_hours),", "SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {} for session", "reverse=True) return [{'position': position, 'hours': position_durations[position]} for position in sorted_positions] def get_daily_statistics(year, user=None):", "SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH", "every day of the given year annotated with the controlling hours for that", "(Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW", "> 2 else 12 if MONTH_NOW > 1 else 11) & Q(sessions__start__year=YEAR_NOW if", "def get_top_controllers(): \"\"\" Returns query set of active users annotated with controlling hour", "Returns query set of active users annotated with controlling hour sums for the", "Q, DurationField from django.db.models.functions import Coalesce, Cast from django.utils import timezone from .models", "previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns", "from ..users.models import User, Status def annotate_hours(query): \"\"\" Annotates given QuerySet with controlling", "'_' + session.level if position in position_durations: position_durations[position] += session.duration else: position_durations[position] =", "with controlling hours for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months.", "current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers():", "position_durations[position] += session.duration else: position_durations[position] = session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return", "session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position, 'hours': position_durations[position]} for position", "= sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position, 'hours': position_durations[position]} for position in sorted_positions]", "of active users annotated with controlling hours for the current (curr_hours), previous (prev_hours),", "active users annotated with controlling hour sums for the current month (hours) sorted", "\"\"\" Returns a query dictionary of every day of the given year annotated", "timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW -", "& SAME_YEAR) position_durations = {} for session in sessions: position = session.facility +", "Status def annotate_hours(query): \"\"\" Annotates given QuerySet with controlling hours for the current", "by most controlling hours (controllers with no hours are not included). \"\"\" SAME_MONTH", "prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours(): \"\"\" Returns", "& Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW", "import timezone from .models import ControllerSession from ..users.models import User, Status def annotate_hours(query):", "ControllerSession from ..users.models import User, Status def annotate_hours(query): \"\"\" Annotates given QuerySet with", "Annotates given QuerySet with controlling hours for the current (curr_hours), previous (prev_hours), and", "SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return", "query dictionary of every day of the given year annotated with the controlling", "and penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH =", "= timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if", "that day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year) if user: sessions = sessions.filter(user=user) return sessions.extra({'day':", "query set of active users annotated with controlling hour sums for the current", "position_durations[position]} for position in sorted_positions] def get_daily_statistics(year, user=None): \"\"\" Returns a query dictionary", "session.facility + '_' + session.level if position in position_durations: position_durations[position] += session.duration else:", "position in position_durations: position_durations[position] += session.duration else: position_durations[position] = session.duration sorted_positions = sorted(position_durations,", "- 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12", "SAME_YEAR) position_durations = {} for session in sessions: position = session.facility + '_'", "sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position, 'hours': position_durations[position]} for position in", "from django.db.models.functions import Coalesce, Cast from django.utils import timezone from .models import ControllerSession", "> 1 else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW -", "Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours():", "most controlling hours (controllers with no hours are not included). \"\"\" SAME_MONTH =", "are not included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER)", "1 else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1))", "get_user_hours(): \"\"\" Returns query set of active users annotated with controlling hours for", "- 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration',", "django.utils import timezone from .models import ControllerSession from ..users.models import User, Status def", "given year annotated with the controlling hours for that day. \"\"\" sessions =", "query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())),", "MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH =", "\"\"\" Returns query set of active users annotated with controlling hours for the", "\"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration',", "2 else YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH),", "> 1 else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW -", "if MONTH_NOW > 1 else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else", "annotated with controlling hours for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours)", "return [{'position': position, 'hours': position_durations[position]} for position in sorted_positions] def get_daily_statistics(year, user=None): \"\"\"", "sorted_positions] def get_daily_statistics(year, user=None): \"\"\" Returns a query dictionary of every day of", "- 1 if MONTH_NOW > 1 else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW >", "day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year) if user: sessions = sessions.filter(user=user) return sessions.extra({'day': 'date(start)'}).values('day').annotate(value=Sum('duration'))", "from django.db.models import Sum, Q, DurationField from django.db.models.functions import Coalesce, Cast from django.utils", "(prev_prev_hours) months. \"\"\" MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) &", "QuerySet with controlling hours for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours)", "+ session.level if position in position_durations: position_durations[position] += session.duration else: position_durations[position] = session.duration", "for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW =", "datetime import timedelta from django.db.models import Sum, Q, DurationField from django.db.models.functions import Coalesce,", "(Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if MONTH_NOW > 1", "SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions =", "position_durations = {} for session in sessions: position = session.facility + '_' +", "if MONTH_NOW > 2 else YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(),", "sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position, 'hours': position_durations[position]} for position in sorted_positions] def", "1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if", "hours are not included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users =", "(prev_hours), and penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH", "year annotated with the controlling hours for that day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year)", "DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours(): \"\"\" Returns query set of", "1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH),", "with the controlling hours for that day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year) if user:", "position in sorted_positions] def get_daily_statistics(year, user=None): \"\"\" Returns a query dictionary of every", "users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions():", "User, Status def annotate_hours(query): \"\"\" Annotates given QuerySet with controlling hours for the", "= session.facility + '_' + session.level if position in position_durations: position_durations[position] += session.duration", "+= session.duration else: position_durations[position] = session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return [{'position':", "= (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if MONTH_NOW >", "& Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12)", "from datetime import timedelta from django.db.models import Sum, Q, DurationField from django.db.models.functions import", "if position in position_durations: position_durations[position] += session.duration else: position_durations[position] = session.duration sorted_positions =", "SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations =", "a query dictionary of every day of the given year annotated with the", "12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1)) PREV_PREV_MONTH =", "else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1)) PREV_PREV_MONTH", "included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users =", "+ '_' + session.level if position in position_durations: position_durations[position] += session.duration else: position_durations[position]", "months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns query set of active users", "\"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns query set of active users annotated", "query set of active users annotated with controlling hours for the current (curr_hours),", "sessions: position = session.facility + '_' + session.level if position in position_durations: position_durations[position]", "else YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2", "import Sum, Q, DurationField from django.db.models.functions import Coalesce, Cast from django.utils import timezone", "filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours(): \"\"\" Returns query set of active users", "users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month)", "return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH", "hour sums for the current month (hours) sorted by most controlling hours (controllers", "import Coalesce, Cast from django.utils import timezone from .models import ControllerSession from ..users.models", "(curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\"", "users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR =", "controlling hour sums for the current month (hours) sorted by most controlling hours", "Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12) &", "Coalesce, Cast from django.utils import timezone from .models import ControllerSession from ..users.models import", "the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def", "= (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1", "django.db.models.functions import Coalesce, Cast from django.utils import timezone from .models import ControllerSession from", "current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW = timezone.now().month YEAR_NOW", "\"\"\" MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH", "> 1 else YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW", "annotate_hours(query): \"\"\" Annotates given QuerySet with controlling hours for the current (curr_hours), previous", "MONTH_NOW > 1 else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW", "> 2 else YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration',", "return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(),", "Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours(): \"\"\" Returns query set", "Returns a query dictionary of every day of the given year annotated with", "position_durations: position_durations[position] += session.duration else: position_durations[position] = session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True)", "dictionary of every day of the given year annotated with the controlling hours", "def get_user_hours(): \"\"\" Returns query set of active users annotated with controlling hours", "1 if MONTH_NOW > 1 else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1", "Sum, Q, DurationField from django.db.models.functions import Coalesce, Cast from django.utils import timezone from", "= (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12) & Q(sessions__start__year=YEAR_NOW if", "def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR)", "for the current month (hours) sorted by most controlling hours (controllers with no", "position, 'hours': position_durations[position]} for position in sorted_positions] def get_daily_statistics(year, user=None): \"\"\" Returns a", "get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations", "def get_daily_statistics(year, user=None): \"\"\" Returns a query dictionary of every day of the", "annotated with the controlling hours for that day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year) if", "else YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(),", "annotated with controlling hour sums for the current month (hours) sorted by most", "position = session.facility + '_' + session.level if position in position_durations: position_durations[position] +=", "in position_durations: position_durations[position] += session.duration else: position_durations[position] = session.duration sorted_positions = sorted(position_durations, key=position_durations.get,", "controlling hours (controllers with no hours are not included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month)", "1 else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1))", "controlling hours for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\"", "(curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW = timezone.now().month YEAR_NOW =", "day of the given year annotated with the controlling hours for that day.", "no hours are not included). \"\"\" SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users", "User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH =", "(prev_hours), and penultimate (prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns query", "the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" MONTH_NOW = timezone.now().month", "Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH),", "get_daily_statistics(year, user=None): \"\"\" Returns a query dictionary of every day of the given", "MONTH_NOW > 2 else 12 if MONTH_NOW > 1 else 11) & Q(sessions__start__year=YEAR_NOW", ") def get_user_hours(): \"\"\" Returns query set of active users annotated with controlling", ".models import ControllerSession from ..users.models import User, Status def annotate_hours(query): \"\"\" Annotates given", "hours for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" return", "set of active users annotated with controlling hour sums for the current month", "hours for that day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year) if user: sessions = sessions.filter(user=user)", "= timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW", "12 if MONTH_NOW > 1 else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2", "the controlling hours for that day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year) if user: sessions", "from .models import ControllerSession from ..users.models import User, Status def annotate_hours(query): \"\"\" Annotates", "= users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR", "ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {} for session in sessions: position = session.facility", "timedelta from django.db.models import Sum, Q, DurationField from django.db.models.functions import Coalesce, Cast from", "CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW >", "user=None): \"\"\" Returns a query dictionary of every day of the given year", "2 if MONTH_NOW > 2 else 12 if MONTH_NOW > 1 else 11)", "the current month (hours) sorted by most controlling hours (controllers with no hours", "sorted by most controlling hours (controllers with no hours are not included). \"\"\"", "session.level if position in position_durations: position_durations[position] += session.duration else: position_durations[position] = session.duration sorted_positions", "MONTH_NOW > 1 else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW", "position_durations[position] = session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position, 'hours': position_durations[position]}", "session in sessions: position = session.facility + '_' + session.level if position in", "\"\"\" Annotates given QuerySet with controlling hours for the current (curr_hours), previous (prev_hours),", "sums for the current month (hours) sorted by most controlling hours (controllers with", "Cast(timedelta(), DurationField())), ) def get_user_hours(): \"\"\" Returns query set of active users annotated", "Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR))", "django.db.models import Sum, Q, DurationField from django.db.models.functions import Coalesce, Cast from django.utils import", "import timedelta from django.db.models import Sum, Q, DurationField from django.db.models.functions import Coalesce, Cast", "months. \"\"\" MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW))", "users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH &", "= Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {} for session in", "prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours(): \"\"\" Returns query set of active", "YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1", "for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. \"\"\" return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER))", "= ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {} for session in sessions: position =", "= User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH", "..users.models import User, Status def annotate_hours(query): \"\"\" Annotates given QuerySet with controlling hours", "(Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else", "Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {} for", "else 12 if MONTH_NOW > 1 else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW >", "curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), )", "= Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours')", "& SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions", "1 else YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW >", "for that day. \"\"\" sessions = ControllerSession.objects.filter(start__year=year) if user: sessions = sessions.filter(user=user) return", "return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): \"\"\" Returns query set of active users annotated with", "given QuerySet with controlling hours for the current (curr_hours), previous (prev_hours), and penultimate", "if MONTH_NOW > 2 else 12 if MONTH_NOW > 1 else 11) &", "Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {} for session in sessions:", "import ControllerSession from ..users.models import User, Status def annotate_hours(query): \"\"\" Annotates given QuerySet", "month (hours) sorted by most controlling hours (controllers with no hours are not", "else: position_durations[position] = session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position, 'hours':", "{} for session in sessions: position = session.facility + '_' + session.level if" ]
[ "listdir(folder_in) all_info = [] for f in files: if \"json\" not in f", "attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {} for attr in attributes: df_dict[attr] =", "f, \"r\") as f: all_info += json.load(f) with open(folder_out + \"INFO.json\", \"w\") as", "pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b = [folder_in +", "date_in, date_out): all_b = [folder_in + e for e in listdir(folder_in) if date_in", "in f: continue with open(folder_in + f, \"r\") as f: all_info += json.load(f)", "json from os import listdir import pandas as pd def save_raw_listing_info(folder_in, folder_out): files", "\"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b = [folder_in + e for", "as f: all_info += json.load(f) with open(folder_out + \"INFO.json\", \"w\") as f: json.dump(all_info,", "in all_b: f = open(file, \"r\") collect += json.load(f) with open(folder_out + \"AVALIBILITY\"+date_in.replace(\"-\",\"\")+\"_\"+date_out.replace(\"-\",\"\")+\".json\",", "pandas as pd def save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in) all_info = [] for", "import listdir import pandas as pd def save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in) all_info", "+ \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b = [folder_in + e", "\"r\") collect += json.load(f) with open(folder_out + \"AVALIBILITY\"+date_in.replace(\"-\",\"\")+\"_\"+date_out.replace(\"-\",\"\")+\".json\", \"w\") as f: json.dump(collect, f,", "all_b: f = open(file, \"r\") collect += json.load(f) with open(folder_out + \"AVALIBILITY\"+date_in.replace(\"-\",\"\")+\"_\"+date_out.replace(\"-\",\"\")+\".json\", \"w\")", "list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {} for attr in attributes: df_dict[attr] = [] for", "\"info\" not in f: continue with open(folder_in + f, \"r\") as f: all_info", "open(file, \"r\") collect += json.load(f) with open(folder_out + \"AVALIBILITY\"+date_in.replace(\"-\",\"\")+\"_\"+date_out.replace(\"-\",\"\")+\".json\", \"w\") as f: json.dump(collect,", "date_out): all_b = [folder_in + e for e in listdir(folder_in) if date_in in", "open(folder_out + \"INFO.json\", \"w\") as f: json.dump(all_info, f, indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\")", "indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {} for attr in attributes: df_dict[attr]", "attributes: df_dict[attr] = [] for info in all_info: for attr in attributes: df_dict[attr].append(info[attr])", "\"w\") as f: json.dump(all_info, f, indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {}", "e in listdir(folder_in) if date_in in e] collect = [] for file in", "f: continue with open(folder_in + f, \"r\") as f: all_info += json.load(f) with", "for file in all_b: f = open(file, \"r\") collect += json.load(f) with open(folder_out", "import pandas as pd def save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in) all_info = []", "as pd def save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in) all_info = [] for f", "for info in all_info: for attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False)", "save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in) all_info = [] for f in files: if", "as f: json.dump(all_info, f, indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {} for", "listdir import pandas as pd def save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in) all_info =", "{} for attr in attributes: df_dict[attr] = [] for info in all_info: for", "json.load(f) with open(folder_out + \"INFO.json\", \"w\") as f: json.dump(all_info, f, indent=2) attributes =", "json.dump(all_info, f, indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {} for attr in", "<reponame>lihd1003/Booking.com-Scraper from ast import literal_eval import json from os import listdir import pandas", "with open(folder_out + \"INFO.json\", \"w\") as f: json.dump(all_info, f, indent=2) attributes = list(all_info[0].keys())", "\"INFO.json\", \"w\") as f: json.dump(all_info, f, indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict =", "save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b = [folder_in + e for e in listdir(folder_in)", "f = open(file, \"r\") collect += json.load(f) with open(folder_out + \"AVALIBILITY\"+date_in.replace(\"-\",\"\")+\"_\"+date_out.replace(\"-\",\"\")+\".json\", \"w\") as", "= [] for info in all_info: for attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out +", "or \"info\" not in f: continue with open(folder_in + f, \"r\") as f:", "[] for f in files: if \"json\" not in f or \"info\" not", "f or \"info\" not in f: continue with open(folder_in + f, \"r\") as", "attributes.remove(\"room_types\") df_dict = {} for attr in attributes: df_dict[attr] = [] for info", "all_info: for attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out,", "[] for file in all_b: f = open(file, \"r\") collect += json.load(f) with", "pd def save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in) all_info = [] for f in", "e] collect = [] for file in all_b: f = open(file, \"r\") collect", "continue with open(folder_in + f, \"r\") as f: all_info += json.load(f) with open(folder_out", "f: all_info += json.load(f) with open(folder_out + \"INFO.json\", \"w\") as f: json.dump(all_info, f,", "file in all_b: f = open(file, \"r\") collect += json.load(f) with open(folder_out +", "date_in in e] collect = [] for file in all_b: f = open(file,", "import json from os import listdir import pandas as pd def save_raw_listing_info(folder_in, folder_out):", "all_info += json.load(f) with open(folder_out + \"INFO.json\", \"w\") as f: json.dump(all_info, f, indent=2)", "with open(folder_in + f, \"r\") as f: all_info += json.load(f) with open(folder_out +", "open(folder_in + f, \"r\") as f: all_info += json.load(f) with open(folder_out + \"INFO.json\",", "[] for info in all_info: for attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\",", "index=False) def save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b = [folder_in + e for e", "all_info = [] for f in files: if \"json\" not in f or", "from os import listdir import pandas as pd def save_raw_listing_info(folder_in, folder_out): files =", "df_dict = {} for attr in attributes: df_dict[attr] = [] for info in", "def save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b = [folder_in + e for e in", "= open(file, \"r\") collect += json.load(f) with open(folder_out + \"AVALIBILITY\"+date_in.replace(\"-\",\"\")+\"_\"+date_out.replace(\"-\",\"\")+\".json\", \"w\") as f:", "folder_out, date_in, date_out): all_b = [folder_in + e for e in listdir(folder_in) if", "ast import literal_eval import json from os import listdir import pandas as pd", "for attr in attributes: df_dict[attr] = [] for info in all_info: for attr", "= [] for file in all_b: f = open(file, \"r\") collect += json.load(f)", "+= json.load(f) with open(folder_out + \"INFO.json\", \"w\") as f: json.dump(all_info, f, indent=2) attributes", "+ e for e in listdir(folder_in) if date_in in e] collect = []", "+ \"INFO.json\", \"w\") as f: json.dump(all_info, f, indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict", "all_b = [folder_in + e for e in listdir(folder_in) if date_in in e]", "collect = [] for file in all_b: f = open(file, \"r\") collect +=", "= list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {} for attr in attributes: df_dict[attr] = []", "files = listdir(folder_in) all_info = [] for f in files: if \"json\" not", "in attributes: df_dict[attr] = [] for info in all_info: for attr in attributes:", "f in files: if \"json\" not in f or \"info\" not in f:", "e for e in listdir(folder_in) if date_in in e] collect = [] for", "in files: if \"json\" not in f or \"info\" not in f: continue", "collect += json.load(f) with open(folder_out + \"AVALIBILITY\"+date_in.replace(\"-\",\"\")+\"_\"+date_out.replace(\"-\",\"\")+\".json\", \"w\") as f: json.dump(collect, f, indent=2)", "in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b", "for e in listdir(folder_in) if date_in in e] collect = [] for file", "for f in files: if \"json\" not in f or \"info\" not in", "attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b =", "df_dict[attr] = [] for info in all_info: for attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out", "not in f: continue with open(folder_in + f, \"r\") as f: all_info +=", "in e] collect = [] for file in all_b: f = open(file, \"r\")", "= [] for f in files: if \"json\" not in f or \"info\"", "+ f, \"r\") as f: all_info += json.load(f) with open(folder_out + \"INFO.json\", \"w\")", "df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out, date_in, date_out): all_b = [folder_in", "info in all_info: for attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def", "literal_eval import json from os import listdir import pandas as pd def save_raw_listing_info(folder_in,", "= listdir(folder_in) all_info = [] for f in files: if \"json\" not in", "os import listdir import pandas as pd def save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in)", "\"json\" not in f or \"info\" not in f: continue with open(folder_in +", "files: if \"json\" not in f or \"info\" not in f: continue with", "in f or \"info\" not in f: continue with open(folder_in + f, \"r\")", "import literal_eval import json from os import listdir import pandas as pd def", "from ast import literal_eval import json from os import listdir import pandas as", "folder_out): files = listdir(folder_in) all_info = [] for f in files: if \"json\"", "[folder_in + e for e in listdir(folder_in) if date_in in e] collect =", "def save_raw_listing_info(folder_in, folder_out): files = listdir(folder_in) all_info = [] for f in files:", "in all_info: for attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in,", "f: json.dump(all_info, f, indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {} for attr", "if \"json\" not in f or \"info\" not in f: continue with open(folder_in", "for attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out, date_in,", "\"r\") as f: all_info += json.load(f) with open(folder_out + \"INFO.json\", \"w\") as f:", "= [folder_in + e for e in listdir(folder_in) if date_in in e] collect", "attr in attributes: df_dict[attr] = [] for info in all_info: for attr in", "= {} for attr in attributes: df_dict[attr] = [] for info in all_info:", "f, indent=2) attributes = list(all_info[0].keys()) attributes.remove(\"room_types\") df_dict = {} for attr in attributes:", "listdir(folder_in) if date_in in e] collect = [] for file in all_b: f", "in listdir(folder_in) if date_in in e] collect = [] for file in all_b:", "if date_in in e] collect = [] for file in all_b: f =", "not in f or \"info\" not in f: continue with open(folder_in + f,", "attr in attributes: df_dict[attr].append(info[attr]) pd.DataFrame(df_dict).to_csv(folder_out + \"/INFO.csv\", index=False) def save_raw_avalibility(folder_in, folder_out, date_in, date_out):" ]
[ "-1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, -1.0], [0.0, 0.0])),", "p = stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x,", "np.stack([range(5), range(5, 0, -1)], 1), ([1.0, -1.0], [0.0, 0.0])), # correlation between matching", "]) def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError):", "1), ([1.0, 1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y),", "coding: utf-8 -*- \"\"\" For testing neuromaps.stats functionality \"\"\" import numpy as np", "[0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ # basic one-dimensional input (range(5), range(5), (1.0,", "@pytest.mark.parametrize('x, y, expected', [ # basic one-dimensional input (range(5), range(5), (1.0, 0.0)), #", "correlation between matching columns (np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5), range(5, 0, -1)],", "between matching columns (np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5), range(5, 0, -1)], 1),", "neuromaps import stats @pytest.mark.xfail def test_compare_images(): assert False def test_permtest_metric(): rs = np.random.default_rng(12345678)", "assert False def test_permtest_metric(): rs = np.random.default_rng(12345678) x, y = rs.random(size=(2, 100)) r,", "100)) r, p = stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p", "rs.random(size=(2, 100)) r, p = stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r,", "-1)], 1), range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0, -1)], 1),", "pytest from neuromaps import stats @pytest.mark.xfail def test_compare_images(): assert False def test_permtest_metric(): rs", "rs = np.random.default_rng(12345678) x, y = rs.random(size=(2, 100)) r, p = stats.permtest_metric(x, y)", "0.0)), # broadcasting occurs regardless of input order (np.stack([range(5), range(5, 0, -1)], 1),", "For testing neuromaps.stats functionality \"\"\" import numpy as np import pytest from neuromaps", "r, p = stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p =", "expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5)) assert all(np.isnan(a)", "@pytest.mark.xfail def test_compare_images(): assert False def test_permtest_metric(): rs = np.random.default_rng(12345678) x, y =", "= stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]],", "test_compare_images(): assert False def test_permtest_metric(): rs = np.random.default_rng(12345678) x, y = rs.random(size=(2, 100))", "range(5), (1.0, 0.0)), # broadcasting occurs regardless of input order (np.stack([range(5), range(5, 0,", "as np import pytest from neuromaps import stats @pytest.mark.xfail def test_compare_images(): assert False", "(range(5), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, -1.0], [0.0, 0.0])), # correlation between", "test_permtest_metric(): rs = np.random.default_rng(12345678) x, y = rs.random(size=(2, 100)) r, p = stats.permtest_metric(x,", "import pytest from neuromaps import stats @pytest.mark.xfail def test_compare_images(): assert False def test_permtest_metric():", "testing neuromaps.stats functionality \"\"\" import numpy as np import pytest from neuromaps import", "-1.0], [0.0, 0.0])), # correlation between matching columns (np.stack([range(5), range(5, 0, -1)], 1),", "np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ # basic one-dimensional input (range(5), range(5),", "np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected',", "basic one-dimensional input (range(5), range(5), (1.0, 0.0)), # broadcasting occurs regardless of input", "y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5)) assert", "assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5)) assert all(np.isnan(a) for", "1), range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0, -1)], 1), ([1.0,", "functionality \"\"\" import numpy as np import pytest from neuromaps import stats @pytest.mark.xfail", "y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y])", "range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, -1.0],", "np.random.default_rng(12345678) x, y = rs.random(size=(2, 100)) r, p = stats.permtest_metric(x, y) assert np.allclose([r,", "utf-8 -*- \"\"\" For testing neuromaps.stats functionality \"\"\" import numpy as np import", "np.stack([range(5), range(5, 0, -1)], 1), ([1.0, 1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x, y,", "= stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473])", "input (range(5), range(5), (1.0, 0.0)), # broadcasting occurs regardless of input order (np.stack([range(5),", "test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5))", "= np.random.default_rng(12345678) x, y = rs.random(size=(2, 100)) r, p = stats.permtest_metric(x, y) assert", "def test_compare_images(): assert False def test_permtest_metric(): rs = np.random.default_rng(12345678) x, y = rs.random(size=(2,", "\"\"\" import numpy as np import pytest from neuromaps import stats @pytest.mark.xfail def", "numpy as np import pytest from neuromaps import stats @pytest.mark.xfail def test_compare_images(): assert", "0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert", "-*- \"\"\" For testing neuromaps.stats functionality \"\"\" import numpy as np import pytest", "import numpy as np import pytest from neuromaps import stats @pytest.mark.xfail def test_compare_images():", "y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [", "y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5)) assert all(np.isnan(a) for a in", "range(5, 0, -1)], 1), ([1.0, -1.0], [0.0, 0.0])), # correlation between matching columns", "matching columns (np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5), range(5, 0, -1)], 1), ([1.0,", "[0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476])", "occurs regardless of input order (np.stack([range(5), range(5, 0, -1)], 1), range(5), ([1.0, -1.0],", "-1)], 1), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, 1.0], [0.0, 0.0])) ]) def", "np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r,", "expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5)) assert all(np.isnan(a) for a in stats.efficient_pearsonr([],", "(np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, 1.0], [0.0,", "1), ([1.0, -1.0], [0.0, 0.0])), # correlation between matching columns (np.stack([range(5), range(5, 0,", "import stats @pytest.mark.xfail def test_compare_images(): assert False def test_permtest_metric(): rs = np.random.default_rng(12345678) x,", "0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ # basic one-dimensional input", "False def test_permtest_metric(): rs = np.random.default_rng(12345678) x, y = rs.random(size=(2, 100)) r, p", "stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y,", "np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5)) assert all(np.isnan(a) for a", "0.0])), (range(5), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, -1.0], [0.0, 0.0])), # correlation", "-*- coding: utf-8 -*- \"\"\" For testing neuromaps.stats functionality \"\"\" import numpy as", "range(5, 0, -1)], 1), ([1.0, 1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x, y, expected):", "range(5, 0, -1)], 1), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, 1.0], [0.0, 0.0]))", "order (np.stack([range(5), range(5, 0, -1)], 1), range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5),", "np import pytest from neuromaps import stats @pytest.mark.xfail def test_compare_images(): assert False def", "y = rs.random(size=(2, 100)) r, p = stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023,", "stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x,", "x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y,", "range(5, 0, -1)], 1), range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0,", "0, -1)], 1), ([1.0, 1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x, y, expected): assert", "\"\"\" For testing neuromaps.stats functionality \"\"\" import numpy as np import pytest from", "1), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, 1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x,", "(range(5), range(5), (1.0, 0.0)), # broadcasting occurs regardless of input order (np.stack([range(5), range(5,", "of input order (np.stack([range(5), range(5, 0, -1)], 1), range(5), ([1.0, -1.0], [0.0, 0.0])),", "# correlation between matching columns (np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5), range(5, 0,", "np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ # basic", "[0.0, 0.0])) ]) def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors():", "-1)], 1), ([1.0, 1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x,", "0, -1)], 1), ([1.0, -1.0], [0.0, 0.0])), # correlation between matching columns (np.stack([range(5),", "[0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ # basic one-dimensional", "0, -1)], 1), range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0, -1)],", "y, expected', [ # basic one-dimensional input (range(5), range(5), (1.0, 0.0)), # broadcasting", "p], [0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023,", "# basic one-dimensional input (range(5), range(5), (1.0, 0.0)), # broadcasting occurs regardless of", "one-dimensional input (range(5), range(5), (1.0, 0.0)), # broadcasting occurs regardless of input order", "[0.0, 0.0])), # correlation between matching columns (np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5),", "broadcasting occurs regardless of input order (np.stack([range(5), range(5, 0, -1)], 1), range(5), ([1.0,", "def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4),", "assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ # basic one-dimensional input (range(5),", "(1.0, 0.0)), # broadcasting occurs regardless of input order (np.stack([range(5), range(5, 0, -1)],", "[ # basic one-dimensional input (range(5), range(5), (1.0, 0.0)), # broadcasting occurs regardless", "neuromaps.stats functionality \"\"\" import numpy as np import pytest from neuromaps import stats", "0.0])), # correlation between matching columns (np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5), range(5,", "0, -1)], 1), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, 1.0], [0.0, 0.0])) ])", "([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, -1.0], [0.0,", "assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ #", "0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ # basic one-dimensional input (range(5), range(5), (1.0, 0.0)),", "(np.stack([range(5), range(5, 0, -1)], 1), range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5,", "columns (np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, 1.0],", "stats @pytest.mark.xfail def test_compare_images(): assert False def test_permtest_metric(): rs = np.random.default_rng(12345678) x, y", "x, y = rs.random(size=(2, 100)) r, p = stats.permtest_metric(x, y) assert np.allclose([r, p],", "regardless of input order (np.stack([range(5), range(5, 0, -1)], 1), range(5), ([1.0, -1.0], [0.0,", "input order (np.stack([range(5), range(5, 0, -1)], 1), range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5),", "[0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, -1.0], [0.0, 0.0])), #", "0.0])) ]) def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with", "def test_permtest_metric(): rs = np.random.default_rng(12345678) x, y = rs.random(size=(2, 100)) r, p =", "# -*- coding: utf-8 -*- \"\"\" For testing neuromaps.stats functionality \"\"\" import numpy", "= rs.random(size=(2, 100)) r, p = stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192])", "from neuromaps import stats @pytest.mark.xfail def test_compare_images(): assert False def test_permtest_metric(): rs =", "expected', [ # basic one-dimensional input (range(5), range(5), (1.0, 0.0)), # broadcasting occurs", "def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5)) assert all(np.isnan(a) for a in stats.efficient_pearsonr([], []))", "-1)], 1), ([1.0, -1.0], [0.0, 0.0])), # correlation between matching columns (np.stack([range(5), range(5,", "([1.0, 1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected)", "assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert", "# broadcasting occurs regardless of input order (np.stack([range(5), range(5, 0, -1)], 1), range(5),", "r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p,", "p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192,", "([1.0, -1.0], [0.0, 0.0])), # correlation between matching columns (np.stack([range(5), range(5, 0, -1)],", "1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def" ]
[ "= lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9) else: updates = updates_fn(loss.mean(), params) # EVALUATION", "performing a training step on a mini-batch (by giving # the updates dictionary)", "it smooths the resulting probabilities. \"\"\" def __init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature')", "(disable # dropout layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True) # Create evaluation loss expression", "the parameters to update using an optimisation technique e.g. Nesterov Momentum: `lambda cost,", "= lasagne.layers.get_output(network) # Create a loss expression for training, i.e., a scalar objective", "import print_function # os.environ['THEANO_FLAGS'] = 'device=gpu1' import numpy as np import theano import", "for training, i.e., a scalar objective we want # to minimize (for our", "items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if test_results is not None: items.append('test err={:.2%}'.format(test_results[1])) return", "a function performing a training step on a mini-batch (by giving # the", "= temperature for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn is not None:", "well here, see lasagne.regularization. # Create update expressions for training, i.e., how to", "updates_fn: [optional] a function of the form `fn(cost, params) -> updates` that generates", "and the parameters to update using an optimisation technique e.g. Nesterov Momentum: `lambda", "variable :param final_layer: final layer, a Lasagne layer :param updates_fn: [optional] a function", "self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training loss of NaN'", "self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self): return 1.0 / self._temperature.get_value() @temperature.setter def", "path from which to load parameters. :param network_build_fn: network builder function of the", "None def _epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results): \"\"\" Epoch logging callback, passed", "Here, we'll use Stochastic Gradient # Descent (SGD) with Nesterov momentum, but Lasagne", "lasagne.regularization. # Create update expressions for training, i.e., how to modify the #", "= lasagne.layers.get_output(network, deterministic=True) # Create evaluation loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) #", "parameters at each training step. Here, we'll use Stochastic Gradient # Descent (SGD)", "trainer self.trainer = trainer.Trainer() # Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate", "Tell the trainer to store parameters when the validation score (error rate) is", "len(self.upper_layers) def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities for input samples", "class AbstractClassifier (object): @classmethod def for_model(cls, network_build_fn, params_path=None, *args, **kwargs): \"\"\" Construct a", "# to minimize (for our multi-class problem, it is the cross-entropy loss): loss", "scalar objective we want # to minimize (for our multi-class problem, it is", "returning the corresponding training loss: self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates) # Compile", "an optional path from which to load parameters. :param network_build_fn: network builder function", "def _check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training loss of NaN' else: return", "dictionary) and returning the corresponding training loss: self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates)", "training step on a mini-batch (by giving # the updates dictionary) and returning", "= theano.function([input_var, target_var], [eval_loss.sum(), test_err]) # Compile a function computing the predicted probability", "an expression representing the network's output prediction = lasagne.layers.get_output(network) # Create a loss", "instance, given a pre-built network. :param network: pre-built network, in the form of", "and targets input_var = T.tensor4('inputs') target_var = T.ivector('targets') # Build the network print(\"Building", "functions...\") network = network_build_fn(input_var=input_var, **kwargs) # If a parameters path is provided, load", "# Construct input_var = utils.get_network_input_var(network) target_var = T.ivector('targets') return cls(input_var, target_var, network, *args,", "**kwargs): \"\"\" Construct a classifier instance, given a pre-built network. :param network: pre-built", "test_results): \"\"\" Epoch logging callback, passed to the `self.trainer.report()` \"\"\" items = []", "function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer to store parameters when the validation score", "\"\"\" Construct a classifier, given a network building function and an optional path", "not None: batch = batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y, axis=0)", "step. Here, we'll use Stochastic Gradient # Descent (SGD) with Nesterov momentum, but", "1.0 return y def predict_cls(self, X, batchsize=500, batch_xform_fn=None): prob = self.predict_prob(X, batchsize=batchsize, batch_xform_fn=batch_xform_fn)", "self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err]) # Compile a function computing the predicted", "EVALUATION - VALIDATION, TEST, PREDICTION # Create prediction expressions; use deterministic forward pass", "self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self, x): return lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier (object):", "target_var, final_layer, updates_fn=None): \"\"\" Constructor - construct an `ImageClassifier` instance given variables for", "TRAINING # Get an expression representing the network's output prediction = lasagne.layers.get_output(network) #", "# Tell the trainer to store parameters when the validation score (error rate)", "builder function of the form `fn(input_var, **kwargs) -> lasagne_layer` that constructs a network", "@property def temperature(self): return 1.0 / self._temperature.get_value() @temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 /", "T.ivector('targets') # Build the network print(\"Building model and compiling functions...\") network = network_build_fn(input_var=input_var,", "T.tensor4('inputs') target_var = T.ivector('targets') # Build the network print(\"Building model and compiling functions...\")", "evaluation loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an expression for error", "(object): \"\"\" A softmax function with a temperature setting; increasing it smooths the", "NaN' else: return None def _epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results): \"\"\" Epoch", "the predicted probability self._predict_prob_fn = theano.function([input_var], eval_prediction) # Construct a trainer self.trainer =", "= network # TRAINING # Get an expression representing the network's output prediction", "eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an expression for error count test_err =", "os.environ['THEANO_FLAGS'] = 'device=gpu1' import numpy as np import theano import theano.tensor as T", "# If a parameters path is provided, load them if params_path is not", "provided, load them if params_path is not None: utils.load_model(params_path, network) return cls(input_var, target_var,", "(a Lasagne layer) :param input_var: input variable, a Theano variable :param target_var: target", "the `self.trainer.report()` \"\"\" items = [] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs,", "cls(input_var, target_var, network, *args, **kwargs) @classmethod def for_network(cls, network, *args, **kwargs): \"\"\" Construct", "form of a Lasagne layer :param args: :param kwargs: :return: \"\"\" # Construct", "test_results is not None: items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items) @property def n_upper_layers(self): return", "None: items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items) @property def n_upper_layers(self): return len(self.upper_layers) def predict_prob(self,", "network builder function of the form `fn(input_var, **kwargs) -> lasagne_layer` that constructs a", "evaluation function, the second output value - error rate - is used for", "we'll use Stochastic Gradient # Descent (SGD) with Nesterov momentum, but Lasagne offers", ":param kwargs: :return: \"\"\" # Construct input_var = utils.get_network_input_var(network) target_var = T.ivector('targets') return", "target_var) # Create an expression for error count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var),", "target_var], loss.sum(), updates=updates) # Compile a function computing the validation loss and error:", "a Lasagne layer, given an input variable (a Theano variable) :param params_path: [optional]", "self._temperature) class AbstractClassifier (object): @classmethod def for_model(cls, network_build_fn, params_path=None, *args, **kwargs): \"\"\" Construct", "input, target and a final layer (a Lasagne layer) :param input_var: input variable,", "update expressions for training, i.e., how to modify the # parameters at each", "used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) #", "prediction expressions; use deterministic forward pass (disable # dropout layers) eval_prediction = lasagne.layers.get_output(network,", "the validation score (error rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch,", "default=500 :param temperature: [optional] softmax temperature :return: \"\"\" y = [] if temperature", "the corresponding training loss: self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates) # Compile a", "if batch_xform_fn is not None: batch = batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y", "import numpy as np import theano import theano.tensor as T import lasagne import", "# Compile a function computing the predicted probability self._predict_prob_fn = theano.function([input_var], eval_prediction) #", "loss={:.6f}'.format(train_results[0])) if val_results is not None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if test_results", "**kwargs) @classmethod def for_network(cls, network, *args, **kwargs): \"\"\" Construct a classifier instance, given", "np import theano import theano.tensor as T import lasagne import trainer, utils class", "momentum, but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) if updates_fn is", "updates_fn(loss.mean(), params) # EVALUATION - VALIDATION, TEST, PREDICTION # Create prediction expressions; use", "= T.tensor4('inputs') target_var = T.ivector('targets') # Build the network print(\"Building model and compiling", "@property def n_upper_layers(self): return len(self.upper_layers) def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict", "temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self): return 1.0 / self._temperature.get_value() @temperature.setter", "training, i.e., how to modify the # parameters at each training step. Here,", "network, *args, **kwargs): \"\"\" Construct a classifier instance, given a pre-built network. :param", "if temperature is not None: self.softmax.temperature = 1.0 return y def predict_cls(self, X,", "numpy as np import theano import theano.tensor as T import lasagne import trainer,", "expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an expression for error count test_err", "= batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y, axis=0) if temperature is", "given a pre-built network. :param network: pre-built network, in the form of a", "count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile a function performing a", "function of the form `fn(input_var, **kwargs) -> lasagne_layer` that constructs a network in", "of the form `fn(input_var, **kwargs) -> lasagne_layer` that constructs a network in the", "pre-built network. :param network: pre-built network, in the form of a Lasagne layer", "If a parameters path is provided, load them if params_path is not None:", "final layer, a Lasagne layer :param updates_fn: [optional] a function of the form", "Compile a function computing the predicted probability self._predict_prob_fn = theano.function([input_var], eval_prediction) # Construct", "batchsize=batchsize, shuffle=False): if batch_xform_fn is not None: batch = batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0])", "theano.function([input_var, target_var], [eval_loss.sum(), test_err]) # Compile a function computing the predicted probability self._predict_prob_fn", "= network_build_fn(input_var=input_var, **kwargs) # If a parameters path is provided, load them if", "Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var = input_var self.target_var", "= T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile a function performing a training step", "predicted probability self._predict_prob_fn = theano.function([input_var], eval_prediction) # Construct a trainer self.trainer = trainer.Trainer()", "error rate - is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the epoch", "{}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results is not", "AbstractClassifier (object): @classmethod def for_model(cls, network_build_fn, params_path=None, *args, **kwargs): \"\"\" Construct a classifier,", "batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn is not None: batch = batch_xform_fn(batch)", "Lasagne layer) :param input_var: input variable, a Theano variable :param target_var: target variable,", "our multi-class problem, it is the cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) #", "variable, a Theano variable :param target_var: target variable, a Theano variable :param final_layer:", "test_err]) # Compile a function computing the predicted probability self._predict_prob_fn = theano.function([input_var], eval_prediction)", "= theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self): return 1.0 / self._temperature.get_value() @temperature.setter def temperature(self,", "is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training", "test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile a function performing a training", "variables for inputs and targets input_var = T.tensor4('inputs') target_var = T.ivector('targets') # Build", "Gradient # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more. params", "val err={:.2%}'.format(val_results[0], val_results[1])) if test_results is not None: items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items)", "layer (a Lasagne layer) :param input_var: input variable, a Theano variable :param target_var:", "given variables for input, target and a final layer (a Lasagne layer) :param", "network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network # TRAINING # Get an expression", "add some weight decay as well here, see lasagne.regularization. # Create update expressions", "# self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training loss of", "'temperature') @property def temperature(self): return 1.0 / self._temperature.get_value() @temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0", "params_path=None, *args, **kwargs): \"\"\" Construct a classifier, given a network building function and", "optimisation technique e.g. Nesterov Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\"", "\"\"\" Epoch logging callback, passed to the `self.trainer.report()` \"\"\" items = [] items.append('Epoch", "final_layer, updates_fn=None): \"\"\" Constructor - construct an `ImageClassifier` instance given variables for input,", "Compile a function computing the validation loss and error: self._val_fn = theano.function([input_var, target_var],", "dtype=theano.config.floatX) # Compile a function performing a training step on a mini-batch (by", "PREDICTION # Create prediction expressions; use deterministic forward pass (disable # dropout layers)", "e.g. Nesterov Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var =", "on a mini-batch (by giving # the updates dictionary) and returning the corresponding", "def for_model(cls, network_build_fn, params_path=None, *args, **kwargs): \"\"\" Construct a classifier, given a network", "the resulting probabilities. \"\"\" def __init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def", "# Create an expression for error count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX)", "setting; increasing it smooths the resulting probabilities. \"\"\" def __init__(self, temperature=1.0): self._temperature =", "output value - error rate - is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) #", "= 1.0 return y def predict_cls(self, X, batchsize=500, batch_xform_fn=None): prob = self.predict_prob(X, batchsize=batchsize,", "the form of a Lasagne layer, given an input variable (a Theano variable)", "= theano.function([input_var], eval_prediction) # Construct a trainer self.trainer = trainer.Trainer() # Provide with", "to minimize (for our multi-class problem, it is the cross-entropy loss): loss =", "want # to minimize (for our multi-class problem, it is the cross-entropy loss):", "of a Lasagne layer, given an input variable (a Theano variable) :param params_path:", "not None: self.softmax.temperature = 1.0 return y def predict_cls(self, X, batchsize=500, batch_xform_fn=None): prob", "form of a Lasagne layer, given an input variable (a Theano variable) :param", "not None: items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items) @property def n_upper_layers(self): return len(self.upper_layers) def", "class TemperatureSoftmax (object): \"\"\" A softmax function with a temperature setting; increasing it", "= theano.function([input_var, target_var], loss.sum(), updates=updates) # Compile a function computing the validation loss", "minimize (for our multi-class problem, it is the cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction,", "to store parameters when the validation score (error rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates)", "from which to load network parameters :return: a classifier instance \"\"\" # Prepare", "X: input samples :param batchsize: [optional] mini-batch size default=500 :param temperature: [optional] softmax", "with a temperature setting; increasing it smooths the resulting probabilities. \"\"\" def __init__(self,", "self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training loss of NaN' else:", "@classmethod def for_model(cls, network_build_fn, params_path=None, *args, **kwargs): \"\"\" Construct a classifier, given a", "that constructs a network in the form of a Lasagne layer, given an", "Theano variable :param final_layer: final layer, a Lasagne layer :param updates_fn: [optional] a", "for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell", "None: batch = batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y, axis=0) if", "load them if params_path is not None: utils.load_model(params_path, network) return cls(input_var, target_var, network,", "self.input_var = input_var self.target_var = target_var self.final_layer = final_layer self.softmax = TemperatureSoftmax() network", "epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer to store parameters when the", "* self._temperature) class AbstractClassifier (object): @classmethod def for_model(cls, network_build_fn, params_path=None, *args, **kwargs): \"\"\"", "**kwargs) class ImageClassifier (AbstractClassifier): def __init__(self, input_var, target_var, final_layer, updates_fn=None): \"\"\" Constructor -", "params, learning_rate=0.01, momentum=0.9) else: updates = updates_fn(loss.mean(), params) # EVALUATION - VALIDATION, TEST,", "given an input variable (a Theano variable) :param params_path: [optional] path from which", "validation loss and error: self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err]) # Compile a", "import trainer, utils class TemperatureSoftmax (object): \"\"\" A softmax function with a temperature", "a scalar objective we want # to minimize (for our multi-class problem, it", "input samples :param batchsize: [optional] mini-batch size default=500 :param temperature: [optional] softmax temperature", "\"\"\" y = [] if temperature is not None: self.softmax.temperature = temperature for", "it is the cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could add", "# Create evaluation loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an expression", "batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y, axis=0) if temperature is not", "_check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training loss of NaN' else: return None", "def __init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self): return 1.0 /", "eval_prediction) # Construct a trainer self.trainer = trainer.Trainer() # Provide with training function", "problem, it is the cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could", "not None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if test_results is not None: items.append('test", "loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could add some weight decay as", "updates` that generates update expressions given the cost and the parameters to update", "error count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile a function performing", "a pre-built network. :param network: pre-built network, in the form of a Lasagne", "self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn is not None: batch = batch_xform_fn(batch) y_batch =", "parameters :return: a classifier instance \"\"\" # Prepare Theano variables for inputs and", "delta_time, train_results, val_results, test_results): \"\"\" Epoch logging callback, passed to the `self.trainer.report()` \"\"\"", ":param updates_fn: [optional] a function of the form `fn(cost, params) -> updates` that", "= lasagne.layers.get_all_params(network, trainable=True) if updates_fn is None: updates = lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01,", "= self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y, axis=0) if temperature is not None: self.softmax.temperature", "Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) if updates_fn is None: updates", ":param network: pre-built network, in the form of a Lasagne layer :param args:", "layer) :param input_var: input variable, a Theano variable :param target_var: target variable, a", "@classmethod def for_network(cls, network, *args, **kwargs): \"\"\" Construct a classifier instance, given a", "# Construct a trainer self.trainer = trainer.Trainer() # Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn,", "is not None: items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items) @property def n_upper_layers(self): return len(self.upper_layers)", "learning_rate=0.01, momentum=0.9) else: updates = updates_fn(loss.mean(), params) # EVALUATION - VALIDATION, TEST, PREDICTION", "probabilities for input samples :param X: input samples :param batchsize: [optional] mini-batch size", "[optional] a function of the form `fn(cost, params) -> updates` that generates update", "of a Lasagne layer :param args: :param kwargs: :return: \"\"\" # Construct input_var", "utils.get_network_input_var(network) target_var = T.ivector('targets') return cls(input_var, target_var, network, *args, **kwargs) class ImageClassifier (AbstractClassifier):", "input_var self.target_var = target_var self.final_layer = final_layer self.softmax = TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer,", "return 'Training loss of NaN' else: return None def _epoch_log(self, epoch_number, delta_time, train_results,", "the network's output prediction = lasagne.layers.get_output(network) # Create a loss expression for training,", "function, the second output value - error rate - is used for scoring", "return ', '.join(items) @property def n_upper_layers(self): return len(self.upper_layers) def predict_prob(self, X, batchsize=500, temperature=None,", "/ self._temperature.get_value() @temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self, x): return", "[optional] mini-batch size default=500 :param temperature: [optional] softmax temperature :return: \"\"\" y =", "if val_results is not None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if test_results is", "offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) if updates_fn is None: updates =", "= updates_fn(loss.mean(), params) # EVALUATION - VALIDATION, TEST, PREDICTION # Create prediction expressions;", "def n_upper_layers(self): return len(self.upper_layers) def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities", "loss.sum(), updates=updates) # Compile a function computing the validation loss and error: self._val_fn", "samples :param batchsize: [optional] mini-batch size default=500 :param temperature: [optional] softmax temperature :return:", "(a Theano variable) :param params_path: [optional] path from which to load network parameters", "{:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results is not None: items.append('val", "giving # the updates dictionary) and returning the corresponding training loss: self._train_fn =", "temperature is not None: self.softmax.temperature = temperature for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False):", "import theano.tensor as T import lasagne import trainer, utils class TemperatureSoftmax (object): \"\"\"", "+ 1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results is not None: items.append('val loss={:.6f},", ":param target_var: target variable, a Theano variable :param final_layer: final layer, a Lasagne", "import theano import theano.tensor as T import lasagne import trainer, utils class TemperatureSoftmax", "training loss: self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates) # Compile a function computing", "return cls(input_var, target_var, network, *args, **kwargs) @classmethod def for_network(cls, network, *args, **kwargs): \"\"\"", "Set the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer to store parameters", "[optional] softmax temperature :return: \"\"\" y = [] if temperature is not None:", "-> updates` that generates update expressions given the cost and the parameters to", "technique e.g. Nesterov Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var", "load network parameters :return: a classifier instance \"\"\" # Prepare Theano variables for", "def temperature(self): return 1.0 / self._temperature.get_value() @temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value))", "return y def predict_cls(self, X, batchsize=500, batch_xform_fn=None): prob = self.predict_prob(X, batchsize=batchsize, batch_xform_fn=batch_xform_fn) return", "training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation function, the second output value", "- VALIDATION, TEST, PREDICTION # Create prediction expressions; use deterministic forward pass (disable", "which to load parameters. :param network_build_fn: network builder function of the form `fn(input_var,", "function performing a training step on a mini-batch (by giving # the updates", "\"\"\" # Prepare Theano variables for inputs and targets input_var = T.tensor4('inputs') target_var", "with Nesterov momentum, but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) if", "network_build_fn, params_path=None, *args, **kwargs): \"\"\" Construct a classifier, given a network building function", "params) -> updates` that generates update expressions given the cost and the parameters", "scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the", "epoch_number, delta_time, train_results, val_results, test_results): \"\"\" Epoch logging callback, passed to the `self.trainer.report()`", "= T.ivector('targets') # Build the network print(\"Building model and compiling functions...\") network =", "__future__ import print_function # os.environ['THEANO_FLAGS'] = 'device=gpu1' import numpy as np import theano", "with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation function, the second output", "and compiling functions...\") network = network_build_fn(input_var=input_var, **kwargs) # If a parameters path is", "TEST, PREDICTION # Create prediction expressions; use deterministic forward pass (disable # dropout", "and an optional path from which to load parameters. :param network_build_fn: network builder", "class ImageClassifier (AbstractClassifier): def __init__(self, input_var, target_var, final_layer, updates_fn=None): \"\"\" Constructor - construct", "n_upper_layers(self): return len(self.upper_layers) def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities for", "network_build_fn(input_var=input_var, **kwargs) # If a parameters path is provided, load them if params_path", "`ImageClassifier` instance given variables for input, target and a final layer (a Lasagne", "# os.environ['THEANO_FLAGS'] = 'device=gpu1' import numpy as np import theano import theano.tensor as", "self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates) # Compile a function computing the validation", ":param network_build_fn: network builder function of the form `fn(input_var, **kwargs) -> lasagne_layer` that", "target_var = T.ivector('targets') return cls(input_var, target_var, network, *args, **kwargs) class ImageClassifier (AbstractClassifier): def", "= utils.get_network_input_var(network) target_var = T.ivector('targets') return cls(input_var, target_var, network, *args, **kwargs) class ImageClassifier", "for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn is not None: batch =", "path from which to load network parameters :return: a classifier instance \"\"\" #", "network parameters :return: a classifier instance \"\"\" # Prepare Theano variables for inputs", "updates=updates) # Compile a function computing the validation loss and error: self._val_fn =", "computing the validation loss and error: self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err]) #", "= [] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if", "self.trainer = trainer.Trainer() # Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with", "kwargs: :return: \"\"\" # Construct input_var = utils.get_network_input_var(network) target_var = T.ivector('targets') return cls(input_var,", "args: :param kwargs: :return: \"\"\" # Construct input_var = utils.get_network_input_var(network) target_var = T.ivector('targets')", "which to load network parameters :return: a classifier instance \"\"\" # Prepare Theano", "with evaluation function, the second output value - error rate - is used", "# Build the network print(\"Building model and compiling functions...\") network = network_build_fn(input_var=input_var, **kwargs)", "np.isnan(train_epoch_results).any(): return 'Training loss of NaN' else: return None def _epoch_log(self, epoch_number, delta_time,", "building function and an optional path from which to load parameters. :param network_build_fn:", "from which to load parameters. :param network_build_fn: network builder function of the form", "utils.load_model(params_path, network) return cls(input_var, target_var, network, *args, **kwargs) @classmethod def for_network(cls, network, *args,", "variables for input, target and a final layer (a Lasagne layer) :param input_var:", "for input, target and a final layer (a Lasagne layer) :param input_var: input", "the validation loss and error: self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err]) # Compile", "err={:.2%}'.format(val_results[0], val_results[1])) if test_results is not None: items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items) @property", "learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var = input_var self.target_var = target_var self.final_layer = final_layer self.softmax", "temperature is not None: self.softmax.temperature = 1.0 return y def predict_cls(self, X, batchsize=500,", "Create an expression for error count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) #", "second output value - error rate - is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1)", "updates = updates_fn(loss.mean(), params) # EVALUATION - VALIDATION, TEST, PREDICTION # Create prediction", "None: self.softmax.temperature = 1.0 return y def predict_cls(self, X, batchsize=500, batch_xform_fn=None): prob =", "expressions for training, i.e., how to modify the # parameters at each training", "size default=500 :param temperature: [optional] softmax temperature :return: \"\"\" y = [] if", "self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y, axis=0) if temperature is not None: self.softmax.temperature =", "lasagne import trainer, utils class TemperatureSoftmax (object): \"\"\" A softmax function with a", "expressions given the cost and the parameters to update using an optimisation technique", "train_results, val_results, test_results): \"\"\" Epoch logging callback, passed to the `self.trainer.report()` \"\"\" items", "if temperature is not None: self.softmax.temperature = temperature for batch in self.trainer.batch_iterator(X, batchsize=batchsize,", ":param params_path: [optional] path from which to load network parameters :return: a classifier", "print_function # os.environ['THEANO_FLAGS'] = 'device=gpu1' import numpy as np import theano import theano.tensor", "corresponding training loss: self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates) # Compile a function", "how to modify the # parameters at each training step. Here, we'll use", "the # parameters at each training step. Here, we'll use Stochastic Gradient #", "trainer to store parameters when the validation score (error rate) is best #", "at each training step. Here, we'll use Stochastic Gradient # Descent (SGD) with", "mini-batch size default=500 :param temperature: [optional] softmax temperature :return: \"\"\" y = []", "= lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network # TRAINING # Get an expression representing", "target_var = T.ivector('targets') # Build the network print(\"Building model and compiling functions...\") network", "is not None: self.softmax.temperature = temperature for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if", "ImageClassifier (AbstractClassifier): def __init__(self, input_var, target_var, final_layer, updates_fn=None): \"\"\" Constructor - construct an", "lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier (object): @classmethod def for_model(cls, network_build_fn, params_path=None, *args, **kwargs):", "function computing the predicted probability self._predict_prob_fn = theano.function([input_var], eval_prediction) # Construct a trainer", "softmax function with a temperature setting; increasing it smooths the resulting probabilities. \"\"\"", "**kwargs) # If a parameters path is provided, load them if params_path is", "network) return cls(input_var, target_var, network, *args, **kwargs) @classmethod def for_network(cls, network, *args, **kwargs):", "batch_xform_fn=None): \"\"\" Predict probabilities for input samples :param X: input samples :param batchsize:", "theano import theano.tensor as T import lasagne import trainer, utils class TemperatureSoftmax (object):", ":return: \"\"\" # Construct input_var = utils.get_network_input_var(network) target_var = T.ivector('targets') return cls(input_var, target_var,", "TemperatureSoftmax (object): \"\"\" A softmax function with a temperature setting; increasing it smooths", "# Compile a function performing a training step on a mini-batch (by giving", "lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network # TRAINING # Get an expression representing the", "is not None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if test_results is not None:", "self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation function, the second output value - error", "given a network building function and an optional path from which to load", "'device=gpu1' import numpy as np import theano import theano.tensor as T import lasagne", "modify the # parameters at each training step. Here, we'll use Stochastic Gradient", "# dropout layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True) # Create evaluation loss expression eval_loss", "- error rate - is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the", "the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer to store parameters when", "Predict probabilities for input samples :param X: input samples :param batchsize: [optional] mini-batch", "an expression for error count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile", "\"\"\" # Construct input_var = utils.get_network_input_var(network) target_var = T.ivector('targets') return cls(input_var, target_var, network,", "None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if test_results is not None: items.append('test err={:.2%}'.format(test_results[1]))", "(error rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any():", "input_var = T.tensor4('inputs') target_var = T.ivector('targets') # Build the network print(\"Building model and", "1.0 / self._temperature.get_value() @temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self, x):", "error: self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err]) # Compile a function computing the", "self.target_var = target_var self.final_layer = final_layer self.softmax = TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax)", "None: updates = lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9) else: updates = updates_fn(loss.mean(), params)", "**kwargs): \"\"\" Construct a classifier, given a network building function and an optional", "in the form of a Lasagne layer, given an input variable (a Theano", "probability self._predict_prob_fn = theano.function([input_var], eval_prediction) # Construct a trainer self.trainer = trainer.Trainer() #", ":param X: input samples :param batchsize: [optional] mini-batch size default=500 :param temperature: [optional]", "Create a loss expression for training, i.e., a scalar objective we want #", "variable) :param params_path: [optional] path from which to load network parameters :return: a", "path is provided, load them if params_path is not None: utils.load_model(params_path, network) return", "`lambda cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var = input_var self.target_var =", "some weight decay as well here, see lasagne.regularization. # Create update expressions for", "not None: self.softmax.temperature = temperature for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn", "train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation function, the second output value - error rate", "is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log)", "objective we want # to minimize (for our multi-class problem, it is the", "layer, a Lasagne layer :param updates_fn: [optional] a function of the form `fn(cost,", "representing the network's output prediction = lasagne.layers.get_output(network) # Create a loss expression for", "logging callback, passed to the `self.trainer.report()` \"\"\" items = [] items.append('Epoch {}/{} took", "items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items) @property def n_upper_layers(self): return len(self.upper_layers) def predict_prob(self, X,", "given the cost and the parameters to update using an optimisation technique e.g.", "batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities for input samples :param X: input samples", "classifier instance, given a pre-built network. :param network: pre-built network, in the form", "resulting probabilities. \"\"\" def __init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self):", "to the `self.trainer.report()` \"\"\" items = [] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1,", "= TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network # TRAINING # Get", "function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation function, the second output value -", "a Theano variable :param target_var: target variable, a Theano variable :param final_layer: final", "rate - is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the epoch logging", "target variable, a Theano variable :param final_layer: final layer, a Lasagne layer :param", "of NaN' else: return None def _epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results): \"\"\"", "x): return lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier (object): @classmethod def for_model(cls, network_build_fn, params_path=None,", "X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities for input samples :param X: input", "loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an expression for error count", "Lasagne layer, given an input variable (a Theano variable) :param params_path: [optional] path", "<reponame>springcoil/deep-learning-tutorial-pydata2016<gh_stars>0 from __future__ import print_function # os.environ['THEANO_FLAGS'] = 'device=gpu1' import numpy as np", "Build the network print(\"Building model and compiling functions...\") network = network_build_fn(input_var=input_var, **kwargs) #", "a function computing the predicted probability self._predict_prob_fn = theano.function([input_var], eval_prediction) # Construct a", "np.concatenate(y, axis=0) if temperature is not None: self.softmax.temperature = 1.0 return y def", "if updates_fn is None: updates = lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9) else: updates", "function computing the validation loss and error: self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err])", "Epoch logging callback, passed to the `self.trainer.report()` \"\"\" items = [] items.append('Epoch {}/{}", "the form `fn(input_var, **kwargs) -> lasagne_layer` that constructs a network in the form", "and returning the corresponding training loss: self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates) #", "*args, **kwargs) @classmethod def for_network(cls, network, *args, **kwargs): \"\"\" Construct a classifier instance,", "return len(self.upper_layers) def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities for input", "temperature :return: \"\"\" y = [] if temperature is not None: self.softmax.temperature =", "Prepare Theano variables for inputs and targets input_var = T.tensor4('inputs') target_var = T.ivector('targets')", "# Evaluate with evaluation function, the second output value - error rate -", "see lasagne.regularization. # Create update expressions for training, i.e., how to modify the", "theano.tensor as T import lasagne import trainer, utils class TemperatureSoftmax (object): \"\"\" A", "a network building function and an optional path from which to load parameters.", "parameters path is provided, load them if params_path is not None: utils.load_model(params_path, network)", "layer, given an input variable (a Theano variable) :param params_path: [optional] path from", "compiling functions...\") network = network_build_fn(input_var=input_var, **kwargs) # If a parameters path is provided,", "Create evaluation loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an expression for", "Lasagne layer :param args: :param kwargs: :return: \"\"\" # Construct input_var = utils.get_network_input_var(network)", "them if params_path is not None: utils.load_model(params_path, network) return cls(input_var, target_var, network, *args,", ":param final_layer: final layer, a Lasagne layer :param updates_fn: [optional] a function of", "parameters when the validation score (error rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def", "y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y, axis=0) if temperature is not None:", "target_var, network, *args, **kwargs) @classmethod def for_network(cls, network, *args, **kwargs): \"\"\" Construct a", "# the updates dictionary) and returning the corresponding training loss: self._train_fn = theano.function([input_var,", "= 'device=gpu1' import numpy as np import theano import theano.tensor as T import", "loss: self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates) # Compile a function computing the", "lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var = input_var self.target_var = target_var self.final_layer =", "= input_var self.target_var = target_var self.final_layer = final_layer self.softmax = TemperatureSoftmax() network =", "\"\"\" def __init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self): return 1.0", "return lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier (object): @classmethod def for_model(cls, network_build_fn, params_path=None, *args,", "Constructor - construct an `ImageClassifier` instance given variables for input, target and a", "1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results is not None: items.append('val loss={:.6f}, val", "\"\"\" self.input_var = input_var self.target_var = target_var self.final_layer = final_layer self.softmax = TemperatureSoftmax()", "a mini-batch (by giving # the updates dictionary) and returning the corresponding training", "store parameters when the validation score (error rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network)", "samples :param X: input samples :param batchsize: [optional] mini-batch size default=500 :param temperature:", "for inputs and targets input_var = T.tensor4('inputs') target_var = T.ivector('targets') # Build the", "inputs and targets input_var = T.tensor4('inputs') target_var = T.ivector('targets') # Build the network", "= final_layer self.softmax = TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network #", "we want # to minimize (for our multi-class problem, it is the cross-entropy", "expression for training, i.e., a scalar objective we want # to minimize (for", "epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training loss of NaN' else: return None def", "trainable=True) if updates_fn is None: updates = lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9) else:", "dropout layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True) # Create evaluation loss expression eval_loss =", "def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities for input samples :param", "lasagne_layer` that constructs a network in the form of a Lasagne layer, given", "callback, passed to the `self.trainer.report()` \"\"\" items = [] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number", "update expressions given the cost and the parameters to update using an optimisation", "as np import theano import theano.tensor as T import lasagne import trainer, utils", "temperature: [optional] softmax temperature :return: \"\"\" y = [] if temperature is not", "as T import lasagne import trainer, utils class TemperatureSoftmax (object): \"\"\" A softmax", "**kwargs) -> lasagne_layer` that constructs a network in the form of a Lasagne", "the network print(\"Building model and compiling functions...\") network = network_build_fn(input_var=input_var, **kwargs) # If", "self.network = network # TRAINING # Get an expression representing the network's output", "network. :param network: pre-built network, in the form of a Lasagne layer :param", "a Lasagne layer :param updates_fn: [optional] a function of the form `fn(cost, params)", "optional path from which to load parameters. :param network_build_fn: network builder function of", "a final layer (a Lasagne layer) :param input_var: input variable, a Theano variable", "a function computing the validation loss and error: self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(),", "for error count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile a function", "is not None: batch = batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y,", "expressions; use deterministic forward pass (disable # dropout layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True)", "y def predict_cls(self, X, batchsize=500, batch_xform_fn=None): prob = self.predict_prob(X, batchsize=batchsize, batch_xform_fn=batch_xform_fn) return np.argmax(prob,", "T import lasagne import trainer, utils class TemperatureSoftmax (object): \"\"\" A softmax function", "cost and the parameters to update using an optimisation technique e.g. Nesterov Momentum:", "instance given variables for input, target and a final layer (a Lasagne layer)", "a loss expression for training, i.e., a scalar objective we want # to", "[] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results", ":param batchsize: [optional] mini-batch size default=500 :param temperature: [optional] softmax temperature :return: \"\"\"", "# We could add some weight decay as well here, see lasagne.regularization. #", "Lasagne layer :param updates_fn: [optional] a function of the form `fn(cost, params) ->", "Construct a trainer self.trainer = trainer.Trainer() # Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results)", "input_var = utils.get_network_input_var(network) target_var = T.ivector('targets') return cls(input_var, target_var, network, *args, **kwargs) class", "VALIDATION, TEST, PREDICTION # Create prediction expressions; use deterministic forward pass (disable #", "- is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the epoch logging function", "updates dictionary) and returning the corresponding training loss: self._train_fn = theano.function([input_var, target_var], loss.sum(),", "items = [] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0]))", "self.softmax = TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network # TRAINING #", "[eval_loss.sum(), test_err]) # Compile a function computing the predicted probability self._predict_prob_fn = theano.function([input_var],", "and a final layer (a Lasagne layer) :param input_var: input variable, a Theano", "output prediction = lasagne.layers.get_output(network) # Create a loss expression for training, i.e., a", "eval_prediction = lasagne.layers.get_output(network, deterministic=True) # Create evaluation loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var)", "the updates dictionary) and returning the corresponding training loss: self._train_fn = theano.function([input_var, target_var],", "# Set the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer to store", "logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer to store parameters when the validation", "the form `fn(cost, params) -> updates` that generates update expressions given the cost", "@temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self, x): return lasagne.nonlinearities.softmax(x *", "params = lasagne.layers.get_all_params(network, trainable=True) if updates_fn is None: updates = lasagne.updates.nesterov_momentum( loss.mean(), params,", "lasagne.layers.get_output(network, deterministic=True) # Create evaluation loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create", "utils class TemperatureSoftmax (object): \"\"\" A softmax function with a temperature setting; increasing", "TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network # TRAINING # Get an", "\"\"\" A softmax function with a temperature setting; increasing it smooths the resulting", "`self.trainer.report()` \"\"\" items = [] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time))", "lasagne.layers.get_all_params(network, trainable=True) if updates_fn is None: updates = lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9)", "to load parameters. :param network_build_fn: network builder function of the form `fn(input_var, **kwargs)", "decay as well here, see lasagne.regularization. # Create update expressions for training, i.e.,", "a training step on a mini-batch (by giving # the updates dictionary) and", "loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if test_results is not None: items.append('test err={:.2%}'.format(test_results[1])) return ',", "theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self): return 1.0 / self._temperature.get_value() @temperature.setter def temperature(self, value):", "return None def _epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results): \"\"\" Epoch logging callback,", "form `fn(cost, params) -> updates` that generates update expressions given the cost and", "params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var = input_var self.target_var = target_var self.final_layer", "batch = batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y = np.concatenate(y, axis=0) if temperature", "if params_path is not None: utils.load_model(params_path, network) return cls(input_var, target_var, network, *args, **kwargs)", "import lasagne import trainer, utils class TemperatureSoftmax (object): \"\"\" A softmax function with", "def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self, x): return lasagne.nonlinearities.softmax(x * self._temperature)", "use Stochastic Gradient # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty", "loss and error: self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err]) # Compile a function", "'.join(items) @property def n_upper_layers(self): return len(self.upper_layers) def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\"", ":return: a classifier instance \"\"\" # Prepare Theano variables for inputs and targets", "instance \"\"\" # Prepare Theano variables for inputs and targets input_var = T.tensor4('inputs')", "network_build_fn: network builder function of the form `fn(input_var, **kwargs) -> lasagne_layer` that constructs", "function with a temperature setting; increasing it smooths the resulting probabilities. \"\"\" def", "increasing it smooths the resulting probabilities. \"\"\" def __init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature),", "in the form of a Lasagne layer :param args: :param kwargs: :return: \"\"\"", "computing the predicted probability self._predict_prob_fn = theano.function([input_var], eval_prediction) # Construct a trainer self.trainer", "function of the form `fn(cost, params) -> updates` that generates update expressions given", "items.append('train loss={:.6f}'.format(train_results[0])) if val_results is not None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if", "for input samples :param X: input samples :param batchsize: [optional] mini-batch size default=500", "update using an optimisation technique e.g. Nesterov Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost, params,", "(SGD) with Nesterov momentum, but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True)", "is not None: utils.load_model(params_path, network) return cls(input_var, target_var, network, *args, **kwargs) @classmethod def", "pass (disable # dropout layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True) # Create evaluation loss", "We could add some weight decay as well here, see lasagne.regularization. # Create", "target_var: target variable, a Theano variable :param final_layer: final layer, a Lasagne layer", "final_layer: final layer, a Lasagne layer :param updates_fn: [optional] a function of the", "validation score (error rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results):", "# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more. params =", "loss of NaN' else: return None def _epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results):", "params) # EVALUATION - VALIDATION, TEST, PREDICTION # Create prediction expressions; use deterministic", "', '.join(items) @property def n_upper_layers(self): return len(self.upper_layers) def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None):", "\"\"\" Constructor - construct an `ImageClassifier` instance given variables for input, target and", "else: updates = updates_fn(loss.mean(), params) # EVALUATION - VALIDATION, TEST, PREDICTION # Create", "classifier, given a network building function and an optional path from which to", "the form of a Lasagne layer :param args: :param kwargs: :return: \"\"\" #", "classifier instance \"\"\" # Prepare Theano variables for inputs and targets input_var =", "params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var = input_var self.target_var = target_var self.final_layer = final_layer", "weight decay as well here, see lasagne.regularization. # Create update expressions for training,", "Nesterov momentum, but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) if updates_fn", "self._temperature.get_value() @temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self, x): return lasagne.nonlinearities.softmax(x", "network in the form of a Lasagne layer, given an input variable (a", "return 1.0 / self._temperature.get_value() @temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self,", "def __call__(self, x): return lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier (object): @classmethod def for_model(cls,", "[optional] path from which to load network parameters :return: a classifier instance \"\"\"", "network = network_build_fn(input_var=input_var, **kwargs) # If a parameters path is provided, load them", "target_var) # We could add some weight decay as well here, see lasagne.regularization.", "batch_xform_fn is not None: batch = batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch) y =", "a network in the form of a Lasagne layer, given an input variable", "parameters. :param network_build_fn: network builder function of the form `fn(input_var, **kwargs) -> lasagne_layer`", "*args, **kwargs): \"\"\" Construct a classifier, given a network building function and an", "self._predict_prob_fn = theano.function([input_var], eval_prediction) # Construct a trainer self.trainer = trainer.Trainer() # Provide", "network # TRAINING # Get an expression representing the network's output prediction =", "value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self, x): return lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier", "an optimisation technique e.g. Nesterov Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)`", "if np.isnan(train_epoch_results).any(): return 'Training loss of NaN' else: return None def _epoch_log(self, epoch_number,", "# Create update expressions for training, i.e., how to modify the # parameters", "parameters to update using an optimisation technique e.g. Nesterov Momentum: `lambda cost, params:", "return cls(input_var, target_var, network, *args, **kwargs) class ImageClassifier (AbstractClassifier): def __init__(self, input_var, target_var,", "# Prepare Theano variables for inputs and targets input_var = T.tensor4('inputs') target_var =", "expression for error count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile a", "but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) if updates_fn is None:", "Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation function, the second", "input samples :param X: input samples :param batchsize: [optional] mini-batch size default=500 :param", "smooths the resulting probabilities. \"\"\" def __init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property", "updates = lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9) else: updates = updates_fn(loss.mean(), params) #", "# Create prediction expressions; use deterministic forward pass (disable # dropout layers) eval_prediction", "in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn is not None: batch = batch_xform_fn(batch) y_batch", "Compile a function performing a training step on a mini-batch (by giving #", "for training, i.e., how to modify the # parameters at each training step.", "temperature(self): return 1.0 / self._temperature.get_value() @temperature.setter def temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def", "Theano variable :param target_var: target variable, a Theano variable :param final_layer: final layer,", "network's output prediction = lasagne.layers.get_output(network) # Create a loss expression for training, i.e.,", "target_var), dtype=theano.config.floatX) # Compile a function performing a training step on a mini-batch", "= np.concatenate(y, axis=0) if temperature is not None: self.softmax.temperature = 1.0 return y", "the second output value - error rate - is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn,", "temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities for input samples :param X: input samples :param", "multi-class problem, it is the cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) # We", "(for our multi-class problem, it is the cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)", "is the cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could add some", "self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer to store parameters when the validation score (error", "that generates update expressions given the cost and the parameters to update using", "print(\"Building model and compiling functions...\") network = network_build_fn(input_var=input_var, **kwargs) # If a parameters", ":param input_var: input variable, a Theano variable :param target_var: target variable, a Theano", "= lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an expression for error count test_err = T.sum(T.neq(T.argmax(eval_prediction,", "val_results is not None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1])) if test_results is not", "def _epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results): \"\"\" Epoch logging callback, passed to", "a trainer self.trainer = trainer.Trainer() # Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) #", "when the validation score (error rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self,", "training step. Here, we'll use Stochastic Gradient # Descent (SGD) with Nesterov momentum,", "a function of the form `fn(cost, params) -> updates` that generates update expressions", "Construct a classifier instance, given a pre-built network. :param network: pre-built network, in", "to update using an optimisation technique e.g. Nesterov Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost,", "Nesterov Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var = input_var", "__init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self): return 1.0 / self._temperature.get_value()", "-> lasagne_layer` that constructs a network in the form of a Lasagne layer,", "each training step. Here, we'll use Stochastic Gradient # Descent (SGD) with Nesterov", "and error: self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err]) # Compile a function computing", "'Training loss of NaN' else: return None def _epoch_log(self, epoch_number, delta_time, train_results, val_results,", "i.e., how to modify the # parameters at each training step. Here, we'll", "theano.function([input_var, target_var], loss.sum(), updates=updates) # Compile a function computing the validation loss and", "could add some weight decay as well here, see lasagne.regularization. # Create update", "value)) def __call__(self, x): return lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier (object): @classmethod def", "(AbstractClassifier): def __init__(self, input_var, target_var, final_layer, updates_fn=None): \"\"\" Constructor - construct an `ImageClassifier`", "a Lasagne layer :param args: :param kwargs: :return: \"\"\" # Construct input_var =", "lasagne.layers.get_output(network) # Create a loss expression for training, i.e., a scalar objective we", "input_var: input variable, a Theano variable :param target_var: target variable, a Theano variable", "network, *args, **kwargs) @classmethod def for_network(cls, network, *args, **kwargs): \"\"\" Construct a classifier", "= trainer.Trainer() # Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation", "network, in the form of a Lasagne layer :param args: :param kwargs: :return:", "def for_network(cls, network, *args, **kwargs): \"\"\" Construct a classifier instance, given a pre-built", "self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer", "network print(\"Building model and compiling functions...\") network = network_build_fn(input_var=input_var, **kwargs) # If a", "the trainer to store parameters when the validation score (error rate) is best", "train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training loss of NaN' else: return None def _epoch_log(self,", "shuffle=False): if batch_xform_fn is not None: batch = batch_xform_fn(batch) y_batch = self._predict_prob_fn(batch[0]) y.append(y_batch)", "expression representing the network's output prediction = lasagne.layers.get_output(network) # Create a loss expression", "# Create a loss expression for training, i.e., a scalar objective we want", "__call__(self, x): return lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier (object): @classmethod def for_model(cls, network_build_fn,", "more. params = lasagne.layers.get_all_params(network, trainable=True) if updates_fn is None: updates = lasagne.updates.nesterov_momentum( loss.mean(),", "Theano variables for inputs and targets input_var = T.tensor4('inputs') target_var = T.ivector('targets') #", "cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could add some weight decay", "loss.mean(), params, learning_rate=0.01, momentum=0.9) else: updates = updates_fn(loss.mean(), params) # EVALUATION - VALIDATION,", "\"\"\" items = [] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time)) items.append('train", "params_path: [optional] path from which to load network parameters :return: a classifier instance", "mini-batch (by giving # the updates dictionary) and returning the corresponding training loss:", "targets input_var = T.tensor4('inputs') target_var = T.ivector('targets') # Build the network print(\"Building model", "network: pre-built network, in the form of a Lasagne layer :param args: :param", "target and a final layer (a Lasagne layer) :param input_var: input variable, a", "a Theano variable :param final_layer: final layer, a Lasagne layer :param updates_fn: [optional]", "Create prediction expressions; use deterministic forward pass (disable # dropout layers) eval_prediction =", "an input variable (a Theano variable) :param params_path: [optional] path from which to", "val_results, test_results): \"\"\" Epoch logging callback, passed to the `self.trainer.report()` \"\"\" items =", "Theano variable) :param params_path: [optional] path from which to load network parameters :return:", "None: utils.load_model(params_path, network) return cls(input_var, target_var, network, *args, **kwargs) @classmethod def for_network(cls, network,", "input variable, a Theano variable :param target_var: target variable, a Theano variable :param", "as well here, see lasagne.regularization. # Create update expressions for training, i.e., how", "deterministic=True) # Create evaluation loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an", "step on a mini-batch (by giving # the updates dictionary) and returning the", "form `fn(input_var, **kwargs) -> lasagne_layer` that constructs a network in the form of", "trainer, utils class TemperatureSoftmax (object): \"\"\" A softmax function with a temperature setting;", "lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could add some weight decay as well here, see", "(object): @classmethod def for_model(cls, network_build_fn, params_path=None, *args, **kwargs): \"\"\" Construct a classifier, given", "to load network parameters :return: a classifier instance \"\"\" # Prepare Theano variables", "*args, **kwargs) class ImageClassifier (AbstractClassifier): def __init__(self, input_var, target_var, final_layer, updates_fn=None): \"\"\" Constructor", "y = [] if temperature is not None: self.softmax.temperature = temperature for batch", "deterministic forward pass (disable # dropout layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True) # Create", "T.ivector('targets') return cls(input_var, target_var, network, *args, **kwargs) class ImageClassifier (AbstractClassifier): def __init__(self, input_var,", "loss expression for training, i.e., a scalar objective we want # to minimize", "model and compiling functions...\") network = network_build_fn(input_var=input_var, **kwargs) # If a parameters path", "__init__(self, input_var, target_var, final_layer, updates_fn=None): \"\"\" Constructor - construct an `ImageClassifier` instance given", "is provided, load them if params_path is not None: utils.load_model(params_path, network) return cls(input_var,", "probabilities. \"\"\" def __init__(self, temperature=1.0): self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature') @property def temperature(self): return", "updates_fn=None): \"\"\" Constructor - construct an `ImageClassifier` instance given variables for input, target", "= [] if temperature is not None: self.softmax.temperature = temperature for batch in", "a classifier instance \"\"\" # Prepare Theano variables for inputs and targets input_var", "T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile a function performing a training step on", "delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results is not None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1]))", "validation_score_fn=1) # Set the epoch logging function self.trainer.report(epoch_log_fn=self._epoch_log) # Tell the trainer to", "prediction = lasagne.layers.get_output(network) # Create a loss expression for training, i.e., a scalar", "layer :param updates_fn: [optional] a function of the form `fn(cost, params) -> updates`", "- construct an `ImageClassifier` instance given variables for input, target and a final", "layer :param args: :param kwargs: :return: \"\"\" # Construct input_var = utils.get_network_input_var(network) target_var", "err={:.2%}'.format(test_results[1])) return ', '.join(items) @property def n_upper_layers(self): return len(self.upper_layers) def predict_prob(self, X, batchsize=500,", "variable (a Theano variable) :param params_path: [optional] path from which to load network", "axis=1), target_var), dtype=theano.config.floatX) # Compile a function performing a training step on a", "= T.ivector('targets') return cls(input_var, target_var, network, *args, **kwargs) class ImageClassifier (AbstractClassifier): def __init__(self,", "y = np.concatenate(y, axis=0) if temperature is not None: self.softmax.temperature = 1.0 return", "\"\"\" Predict probabilities for input samples :param X: input samples :param batchsize: [optional]", "a classifier, given a network building function and an optional path from which", "best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return 'Training loss", "temperature for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn is not None: batch", "variable :param target_var: target variable, a Theano variable :param final_layer: final layer, a", "a classifier instance, given a pre-built network. :param network: pre-built network, in the", "= lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could add some weight decay as well here,", "is not None: self.softmax.temperature = 1.0 return y def predict_cls(self, X, batchsize=500, batch_xform_fn=None):", "input_var, target_var, final_layer, updates_fn=None): \"\"\" Constructor - construct an `ImageClassifier` instance given variables", "loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could add some weight decay as well", "cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002, momentum=0.9)` \"\"\" self.input_var = input_var self.target_var = target_var", "took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results is not None:", "axis=0) if temperature is not None: self.softmax.temperature = 1.0 return y def predict_cls(self,", "from __future__ import print_function # os.environ['THEANO_FLAGS'] = 'device=gpu1' import numpy as np import", "# parameters at each training step. Here, we'll use Stochastic Gradient # Descent", "_epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results): \"\"\" Epoch logging callback, passed to the", "self.softmax.temperature = temperature for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn is not", "(by giving # the updates dictionary) and returning the corresponding training loss: self._train_fn", "score (error rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results): if", "predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None): \"\"\" Predict probabilities for input samples :param X:", "softmax temperature :return: \"\"\" y = [] if temperature is not None: self.softmax.temperature", "= target_var self.final_layer = final_layer self.softmax = TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network", "using an optimisation technique e.g. Nesterov Momentum: `lambda cost, params: lasagne.updates.nesterov_momentum(cost, params, learning_rate=0.002,", "# TRAINING # Get an expression representing the network's output prediction = lasagne.layers.get_output(network)", "the cross-entropy loss): loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) # We could add some weight", ":param args: :param kwargs: :return: \"\"\" # Construct input_var = utils.get_network_input_var(network) target_var =", "the cost and the parameters to update using an optimisation technique e.g. Nesterov", "for_network(cls, network, *args, **kwargs): \"\"\" Construct a classifier instance, given a pre-built network.", "/ value)) def __call__(self, x): return lasagne.nonlinearities.softmax(x * self._temperature) class AbstractClassifier (object): @classmethod", "final layer (a Lasagne layer) :param input_var: input variable, a Theano variable :param", "constructs a network in the form of a Lasagne layer, given an input", "not None: utils.load_model(params_path, network) return cls(input_var, target_var, network, *args, **kwargs) @classmethod def for_network(cls,", "network, *args, **kwargs) class ImageClassifier (AbstractClassifier): def __init__(self, input_var, target_var, final_layer, updates_fn=None): \"\"\"", "if test_results is not None: items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items) @property def n_upper_layers(self):", "rate) is best # self.trainer.retain_best_scoring_state_of_updates(updates) self.trainer.retain_best_scoring_state_of_network(network) def _check_train_epoch_results(self, epoch, train_epoch_results): if np.isnan(train_epoch_results).any(): return", "forward pass (disable # dropout layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True) # Create evaluation", "plenty more. params = lasagne.layers.get_all_params(network, trainable=True) if updates_fn is None: updates = lasagne.updates.nesterov_momentum(", "self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results is not None: items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0],", "*args, **kwargs): \"\"\" Construct a classifier instance, given a pre-built network. :param network:", "Evaluate with evaluation function, the second output value - error rate - is", "batchsize: [optional] mini-batch size default=500 :param temperature: [optional] softmax temperature :return: \"\"\" y", "training, i.e., a scalar objective we want # to minimize (for our multi-class", "network building function and an optional path from which to load parameters. :param", "# Compile a function computing the validation loss and error: self._val_fn = theano.function([input_var,", "a parameters path is provided, load them if params_path is not None: utils.load_model(params_path,", "construct an `ImageClassifier` instance given variables for input, target and a final layer", "Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network,", "cls(input_var, target_var, network, *args, **kwargs) class ImageClassifier (AbstractClassifier): def __init__(self, input_var, target_var, final_layer,", "def predict_cls(self, X, batchsize=500, batch_xform_fn=None): prob = self.predict_prob(X, batchsize=batchsize, batch_xform_fn=batch_xform_fn) return np.argmax(prob, axis=1)", "y.append(y_batch) y = np.concatenate(y, axis=0) if temperature is not None: self.softmax.temperature = 1.0", "is None: updates = lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9) else: updates = updates_fn(loss.mean(),", ":param temperature: [optional] softmax temperature :return: \"\"\" y = [] if temperature is", "layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True) # Create evaluation loss expression eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction,", "of the form `fn(cost, params) -> updates` that generates update expressions given the", "lasagne.objectives.categorical_crossentropy(eval_prediction, target_var) # Create an expression for error count test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1),", "momentum=0.9)` \"\"\" self.input_var = input_var self.target_var = target_var self.final_layer = final_layer self.softmax =", "for_model(cls, network_build_fn, params_path=None, *args, **kwargs): \"\"\" Construct a classifier, given a network building", "here, see lasagne.regularization. # Create update expressions for training, i.e., how to modify", "# Get an expression representing the network's output prediction = lasagne.layers.get_output(network) # Create", "None: self.softmax.temperature = temperature for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False): if batch_xform_fn is", "a temperature setting; increasing it smooths the resulting probabilities. \"\"\" def __init__(self, temperature=1.0):", "theano.function([input_var], eval_prediction) # Construct a trainer self.trainer = trainer.Trainer() # Provide with training", "Create update expressions for training, i.e., how to modify the # parameters at", "an `ImageClassifier` instance given variables for input, target and a final layer (a", "use deterministic forward pass (disable # dropout layers) eval_prediction = lasagne.layers.get_output(network, deterministic=True) #", "value - error rate - is used for scoring self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1) # Set", "variable, a Theano variable :param final_layer: final layer, a Lasagne layer :param updates_fn:", "target_var], [eval_loss.sum(), test_err]) # Compile a function computing the predicted probability self._predict_prob_fn =", "trainer.Trainer() # Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation function,", "to modify the # parameters at each training step. Here, we'll use Stochastic", "generates update expressions given the cost and the parameters to update using an", "temperature setting; increasing it smooths the resulting probabilities. \"\"\" def __init__(self, temperature=1.0): self._temperature", "function and an optional path from which to load parameters. :param network_build_fn: network", "self.final_layer = final_layer self.softmax = TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network", "final_layer self.softmax = TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network = network # TRAINING", "# EVALUATION - VALIDATION, TEST, PREDICTION # Create prediction expressions; use deterministic forward", "`fn(input_var, **kwargs) -> lasagne_layer` that constructs a network in the form of a", "Construct input_var = utils.get_network_input_var(network) target_var = T.ivector('targets') return cls(input_var, target_var, network, *args, **kwargs)", "lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9) else: updates = updates_fn(loss.mean(), params) # EVALUATION -", "pre-built network, in the form of a Lasagne layer :param args: :param kwargs:", "# Provide with training function self.trainer.train_with(train_batch_fn=self._train_fn, train_epoch_results_check_fn=self._check_train_epoch_results) # Evaluate with evaluation function, the", "items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time)) items.append('train loss={:.6f}'.format(train_results[0])) if val_results is", "momentum=0.9) else: updates = updates_fn(loss.mean(), params) # EVALUATION - VALIDATION, TEST, PREDICTION #", "updates_fn is None: updates = lasagne.updates.nesterov_momentum( loss.mean(), params, learning_rate=0.01, momentum=0.9) else: updates =", ":return: \"\"\" y = [] if temperature is not None: self.softmax.temperature = temperature", "passed to the `self.trainer.report()` \"\"\" items = [] items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number +", "else: return None def _epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results): \"\"\" Epoch logging", "input variable (a Theano variable) :param params_path: [optional] path from which to load", "target_var self.final_layer = final_layer self.softmax = TemperatureSoftmax() network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax) self.network =", "Construct a classifier, given a network building function and an optional path from", "target_var, network, *args, **kwargs) class ImageClassifier (AbstractClassifier): def __init__(self, input_var, target_var, final_layer, updates_fn=None):", "[] if temperature is not None: self.softmax.temperature = temperature for batch in self.trainer.batch_iterator(X,", "A softmax function with a temperature setting; increasing it smooths the resulting probabilities.", "load parameters. :param network_build_fn: network builder function of the form `fn(input_var, **kwargs) ->", "params_path is not None: utils.load_model(params_path, network) return cls(input_var, target_var, network, *args, **kwargs) @classmethod", "val_results[1])) if test_results is not None: items.append('test err={:.2%}'.format(test_results[1])) return ', '.join(items) @property def", "self.softmax.temperature = 1.0 return y def predict_cls(self, X, batchsize=500, batch_xform_fn=None): prob = self.predict_prob(X,", "self.softmax) self.network = network # TRAINING # Get an expression representing the network's", "\"\"\" Construct a classifier instance, given a pre-built network. :param network: pre-built network,", "`fn(cost, params) -> updates` that generates update expressions given the cost and the", "i.e., a scalar objective we want # to minimize (for our multi-class problem,", "temperature(self, value): self._temperature.set_value(lasagne.utils.floatX(1.0 / value)) def __call__(self, x): return lasagne.nonlinearities.softmax(x * self._temperature) class", "Stochastic Gradient # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.", "Get an expression representing the network's output prediction = lasagne.layers.get_output(network) # Create a", "def __init__(self, input_var, target_var, final_layer, updates_fn=None): \"\"\" Constructor - construct an `ImageClassifier` instance" ]
[ "as Root from mugimugi_client_api_entity.enum import ElementPrefix, ItemType from .abstract import SearchItem @dataclass class", "ClassVar from mugimugi_client_api_entity import Parody from mugimugi_client_api_entity import SearchParody as Root from mugimugi_client_api_entity.enum", "mugimugi_client_api_entity.enum import ElementPrefix, ItemType from .abstract import SearchItem @dataclass class SearchParody(SearchItem): ROOT: ClassVar[type]", "import ElementPrefix, ItemType from .abstract import SearchItem @dataclass class SearchParody(SearchItem): ROOT: ClassVar[type] =", "import Parody from mugimugi_client_api_entity import SearchParody as Root from mugimugi_client_api_entity.enum import ElementPrefix, ItemType", "mugimugi_client_api_entity import SearchParody as Root from mugimugi_client_api_entity.enum import ElementPrefix, ItemType from .abstract import", "ItemType from .abstract import SearchItem @dataclass class SearchParody(SearchItem): ROOT: ClassVar[type] = Root TYPE:", "ElementPrefix, ItemType from .abstract import SearchItem @dataclass class SearchParody(SearchItem): ROOT: ClassVar[type] = Root", "from mugimugi_client_api_entity import SearchParody as Root from mugimugi_client_api_entity.enum import ElementPrefix, ItemType from .abstract", "SearchItem @dataclass class SearchParody(SearchItem): ROOT: ClassVar[type] = Root TYPE: ClassVar[ItemType] = ItemType.PARODY PREFIX:", "import ClassVar from mugimugi_client_api_entity import Parody from mugimugi_client_api_entity import SearchParody as Root from", "typing import ClassVar from mugimugi_client_api_entity import Parody from mugimugi_client_api_entity import SearchParody as Root", "import SearchParody as Root from mugimugi_client_api_entity.enum import ElementPrefix, ItemType from .abstract import SearchItem", "Root from mugimugi_client_api_entity.enum import ElementPrefix, ItemType from .abstract import SearchItem @dataclass class SearchParody(SearchItem):", "from typing import ClassVar from mugimugi_client_api_entity import Parody from mugimugi_client_api_entity import SearchParody as", "dataclass from typing import ClassVar from mugimugi_client_api_entity import Parody from mugimugi_client_api_entity import SearchParody", "from mugimugi_client_api_entity import Parody from mugimugi_client_api_entity import SearchParody as Root from mugimugi_client_api_entity.enum import", "@dataclass class SearchParody(SearchItem): ROOT: ClassVar[type] = Root TYPE: ClassVar[ItemType] = ItemType.PARODY PREFIX: ClassVar[ElementPrefix]", "SearchParody(SearchItem): ROOT: ClassVar[type] = Root TYPE: ClassVar[ItemType] = ItemType.PARODY PREFIX: ClassVar[ElementPrefix] = Parody.PREFIX", "Parody from mugimugi_client_api_entity import SearchParody as Root from mugimugi_client_api_entity.enum import ElementPrefix, ItemType from", ".abstract import SearchItem @dataclass class SearchParody(SearchItem): ROOT: ClassVar[type] = Root TYPE: ClassVar[ItemType] =", "dataclasses import dataclass from typing import ClassVar from mugimugi_client_api_entity import Parody from mugimugi_client_api_entity", "mugimugi_client_api_entity import Parody from mugimugi_client_api_entity import SearchParody as Root from mugimugi_client_api_entity.enum import ElementPrefix,", "from mugimugi_client_api_entity.enum import ElementPrefix, ItemType from .abstract import SearchItem @dataclass class SearchParody(SearchItem): ROOT:", "from dataclasses import dataclass from typing import ClassVar from mugimugi_client_api_entity import Parody from", "import dataclass from typing import ClassVar from mugimugi_client_api_entity import Parody from mugimugi_client_api_entity import", "<gh_stars>0 from dataclasses import dataclass from typing import ClassVar from mugimugi_client_api_entity import Parody", "class SearchParody(SearchItem): ROOT: ClassVar[type] = Root TYPE: ClassVar[ItemType] = ItemType.PARODY PREFIX: ClassVar[ElementPrefix] =", "from .abstract import SearchItem @dataclass class SearchParody(SearchItem): ROOT: ClassVar[type] = Root TYPE: ClassVar[ItemType]", "SearchParody as Root from mugimugi_client_api_entity.enum import ElementPrefix, ItemType from .abstract import SearchItem @dataclass", "import SearchItem @dataclass class SearchParody(SearchItem): ROOT: ClassVar[type] = Root TYPE: ClassVar[ItemType] = ItemType.PARODY" ]
[ "AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape, mu, w_init, lambda_): self.method = Method.methods[model] self.mu =", "same length. But now, 'd' has {len_d} and 'x' has {len_x}.\" ) w_delta", "import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape, mu, w_init, lambda_): self.method =", "adopt(self, d, x): len_d, len_x = len(d), len(x) if len_d != len_x: raise", "lambda_ def adopt(self, d, x): len_d, len_x = len(d), len(x) if len_d !=", "now, 'd' has {len_d} and 'x' has {len_x}.\" ) w_delta = self.method(d, x,", "shape) self.lambda_ = lambda_ def adopt(self, d, x): len_d, len_x = len(d), len(x)", "len_d != len_x: raise ValueError( f\"2 arrays should have same length. But now,", "now, 'd' has {len_d} and 'x' has {len_x}.\" ) self.method(d, x, self.w, self.mu,", "x): len_d, len_x = len(d), len(x) if len_d != len_x: raise ValueError( f\"2", "from .interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape, mu, w_init, lambda_):", "np from .domain.method import Method, init_w from .interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def", "self.mu = mu self.w = init_w(w_init, shape) self.lambda_ = lambda_ def adopt(self, d,", "def adopt(self, d, x): len_d, len_x = len(d), len(x) if len_d != len_x:", "mu, w_init, lambda_): self.method = Method.methods[model] self.mu = mu self.w = init_w(w_init, shape)", "len_x = len(d), len(x) if len_d != len_x: raise ValueError( f\"2 arrays should", "init_w(w_init, shape) self.lambda_ = lambda_ def adopt(self, d, x): len_d, len_x = len(d),", "import Method, init_w from .interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape,", "Method, init_w from .interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape, mu,", "update(self, d, x): len_d, len_x = len(d), len(x) if len_d != len_x: raise", "self.lambda_ = lambda_ def adopt(self, d, x): len_d, len_x = len(d), len(x) if", "'x' has {len_x}.\" ) self.method(d, x, self.w, self.mu, self.lambda_) def update(self, d, x):", "has {len_d} and 'x' has {len_x}.\" ) self.method(d, x, self.w, self.mu, self.lambda_) def", "{len_d} and 'x' has {len_x}.\" ) w_delta = self.method(d, x, self.w, self.mu, self.lambda_)", "should have same length. But now, 'd' has {len_d} and 'x' has {len_x}.\"", "arrays should have same length. But now, 'd' has {len_d} and 'x' has", "has {len_x}.\" ) w_delta = self.method(d, x, self.w, self.mu, self.lambda_) self.w = self.w", "ValueError( f\"2 arrays should have same length. But now, 'd' has {len_d} and", ") self.method(d, x, self.w, self.mu, self.lambda_) def update(self, d, x): len_d, len_x =", "'d' has {len_d} and 'x' has {len_x}.\" ) w_delta = self.method(d, x, self.w,", "init_w from .interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape, mu, w_init,", "len_x: raise ValueError( f\"2 arrays should have same length. But now, 'd' has", "{len_d} and 'x' has {len_x}.\" ) self.method(d, x, self.w, self.mu, self.lambda_) def update(self,", "length. But now, 'd' has {len_d} and 'x' has {len_x}.\" ) w_delta =", "f\"2 arrays should have same length. But now, 'd' has {len_d} and 'x'", "'x' has {len_x}.\" ) w_delta = self.method(d, x, self.w, self.mu, self.lambda_) self.w =", "has {len_x}.\" ) self.method(d, x, self.w, self.mu, self.lambda_) def update(self, d, x): len_d,", "len(x) if len_d != len_x: raise ValueError( f\"2 arrays should have same length.", "x, self.w, self.mu, self.lambda_) def update(self, d, x): len_d, len_x = len(d), len(x)", "numpy as np from .domain.method import Method, init_w from .interface.filter import AdaptiveSignalProcesserABC class", ") w_delta = self.method(d, x, self.w, self.mu, self.lambda_) self.w = self.w + w_delta", ".domain.method import Method, init_w from .interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model,", "as np from .domain.method import Method, init_w from .interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC):", "= lambda_ def adopt(self, d, x): len_d, len_x = len(d), len(x) if len_d", "self.w, self.mu, self.lambda_) def update(self, d, x): len_d, len_x = len(d), len(x) if", "raise ValueError( f\"2 arrays should have same length. But now, 'd' has {len_d}", "from .domain.method import Method, init_w from .interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self,", "have same length. But now, 'd' has {len_d} and 'x' has {len_x}.\" )", "same length. But now, 'd' has {len_d} and 'x' has {len_x}.\" ) self.method(d,", "w_init, lambda_): self.method = Method.methods[model] self.mu = mu self.w = init_w(w_init, shape) self.lambda_", "Method.methods[model] self.mu = mu self.w = init_w(w_init, shape) self.lambda_ = lambda_ def adopt(self,", "shape, mu, w_init, lambda_): self.method = Method.methods[model] self.mu = mu self.w = init_w(w_init,", "def __init__(self, model, shape, mu, w_init, lambda_): self.method = Method.methods[model] self.mu = mu", "has {len_d} and 'x' has {len_x}.\" ) w_delta = self.method(d, x, self.w, self.mu,", "lambda_): self.method = Method.methods[model] self.mu = mu self.w = init_w(w_init, shape) self.lambda_ =", "def update(self, d, x): len_d, len_x = len(d), len(x) if len_d != len_x:", "But now, 'd' has {len_d} and 'x' has {len_x}.\" ) w_delta = self.method(d,", "= Method.methods[model] self.mu = mu self.w = init_w(w_init, shape) self.lambda_ = lambda_ def", "self.method = Method.methods[model] self.mu = mu self.w = init_w(w_init, shape) self.lambda_ = lambda_", "= self.method(d, x, self.w, self.mu, self.lambda_) self.w = self.w + w_delta return self.w", "len_d, len_x = len(d), len(x) if len_d != len_x: raise ValueError( f\"2 arrays", "self.w = init_w(w_init, shape) self.lambda_ = lambda_ def adopt(self, d, x): len_d, len_x", ".interface.filter import AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape, mu, w_init, lambda_): self.method", "{len_x}.\" ) self.method(d, x, self.w, self.mu, self.lambda_) def update(self, d, x): len_d, len_x", "AdaptiveSignalProcesserABC class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape, mu, w_init, lambda_): self.method = Method.methods[model]", "and 'x' has {len_x}.\" ) w_delta = self.method(d, x, self.w, self.mu, self.lambda_) self.w", "!= len_x: raise ValueError( f\"2 arrays should have same length. But now, 'd'", "if len_d != len_x: raise ValueError( f\"2 arrays should have same length. But", "model, shape, mu, w_init, lambda_): self.method = Method.methods[model] self.mu = mu self.w =", "= mu self.w = init_w(w_init, shape) self.lambda_ = lambda_ def adopt(self, d, x):", "'d' has {len_d} and 'x' has {len_x}.\" ) self.method(d, x, self.w, self.mu, self.lambda_)", "self.mu, self.lambda_) def update(self, d, x): len_d, len_x = len(d), len(x) if len_d", "len(d), len(x) if len_d != len_x: raise ValueError( f\"2 arrays should have same", "class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC): def __init__(self, model, shape, mu, w_init, lambda_): self.method = Method.methods[model] self.mu", "__init__(self, model, shape, mu, w_init, lambda_): self.method = Method.methods[model] self.mu = mu self.w", "mu self.w = init_w(w_init, shape) self.lambda_ = lambda_ def adopt(self, d, x): len_d,", "length. But now, 'd' has {len_d} and 'x' has {len_x}.\" ) self.method(d, x,", "= len(d), len(x) if len_d != len_x: raise ValueError( f\"2 arrays should have", "self.method(d, x, self.w, self.mu, self.lambda_) def update(self, d, x): len_d, len_x = len(d),", "and 'x' has {len_x}.\" ) self.method(d, x, self.w, self.mu, self.lambda_) def update(self, d,", "d, x): len_d, len_x = len(d), len(x) if len_d != len_x: raise ValueError(", "{len_x}.\" ) w_delta = self.method(d, x, self.w, self.mu, self.lambda_) self.w = self.w +", "import numpy as np from .domain.method import Method, init_w from .interface.filter import AdaptiveSignalProcesserABC", "= init_w(w_init, shape) self.lambda_ = lambda_ def adopt(self, d, x): len_d, len_x =", "But now, 'd' has {len_d} and 'x' has {len_x}.\" ) self.method(d, x, self.w,", "self.lambda_) def update(self, d, x): len_d, len_x = len(d), len(x) if len_d !=", "<reponame>borley1211/aspy<filename>src/adasigpy/adasigpy.py import numpy as np from .domain.method import Method, init_w from .interface.filter import", "w_delta = self.method(d, x, self.w, self.mu, self.lambda_) self.w = self.w + w_delta return" ]
[ "sheet in wb.worksheets: for column_cells in sheet.columns: length = (max(len(self.as_text(cell.value)) for cell in", "current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet", "if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet in wb.worksheets: for column_cells in", "{} for column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column,", "__init__( self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output = output def generate(self): wb", "256: if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet)", "1 current_set_column = 1 current_readme_row = 1 readme_sheet = wb.active readme_sheet.title = \"README\"", "sum(len(i) for i in validator.valid_values) + len(validator.valid_values) - 1 > 256: if not", "super(Checkerator, self).__init__(**kwargs) self.output = output def generate(self): wb = Workbook() current_data_column = 1", "validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] =", "openpyxl import Workbook from checkcel.validators import OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter", "import Workbook from checkcel.validators import OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter from", "wb = Workbook() current_data_column = 1 current_ontology_column = 1 current_set_column = 1 current_readme_row", "LinkedSetValidator): if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column),", "length = (max(len(self.as_text(cell.value)) for cell in column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width =", "openpyxl.utils import get_column_letter from checkcel.checkplate import Checkplate class Checkerator(Checkplate): def __init__( self, output,", "Checkerator(Checkplate): def __init__( self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output = output def", "- 1 > 256: if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column),", "set_sheet) current_set_column += 1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator,", "validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\")", "not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb)", "column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation =", "data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet in wb.worksheets: for column_cells in sheet.columns:", "set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1 else:", "readme_sheet = wb.active readme_sheet.title = \"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet", "cell in column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self,", "OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter from checkcel.checkplate import Checkplate class Checkerator(Checkplate):", "= wb.active readme_sheet.title = \"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet =", "separators must be < 256 if sum(len(i) for i in validator.valid_values) + len(validator.valid_values)", "from openpyxl.utils import get_column_letter from checkcel.checkplate import Checkplate class Checkerator(Checkplate): def __init__( self,", "None set_sheet = None set_columns = {} for column_name, validator in self.validators.items(): readme_sheet.cell(column=1,", "if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column", "from checkcel.validators import OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter from checkcel.checkplate import", "get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column),", "from checkcel.checkplate import Checkplate class Checkerator(Checkplate): def __init__( self, output, **kwargs ): super(Checkerator,", "= \"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet = None set_columns =", "data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet = None set_columns = {} for", "for sheet in wb.worksheets: for column_cells in sheet.columns: length = (max(len(self.as_text(cell.value)) for cell", "size, including separators must be < 256 if sum(len(i) for i in validator.valid_values)", "current_ontology_column += 1 elif isinstance(validator, SetValidator): # Total size, including separators must be", "length wb.save(filename=self.output) def as_text(self, value): return str(value) if value is not None else", "from openpyxl import Workbook from checkcel.validators import OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import", "row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator): if not", "if sum(len(i) for i in validator.valid_values) + len(validator.valid_values) - 1 > 256: if", "current_data_column = 1 current_ontology_column = 1 current_set_column = 1 current_readme_row = 1 readme_sheet", "= 1 current_readme_row = 1 readme_sheet = wb.active readme_sheet.title = \"README\" data_sheet =", "checkcel.validators import OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter from checkcel.checkplate import Checkplate", "1 current_readme_row = 1 readme_sheet = wb.active readme_sheet.title = \"README\" data_sheet = wb.create_sheet(title=\"Data\")", "> 256: if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column),", "wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif isinstance(validator, SetValidator): #", "OntologyValidator): if not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column", "set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1", "isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name,", "column_cells in sheet.columns: length = (max(len(self.as_text(cell.value)) for cell in column_cells) + 2) *", "256 if sum(len(i) for i in validator.valid_values) + len(validator.valid_values) - 1 > 256:", "in sheet.columns: length = (max(len(self.as_text(cell.value)) for cell in column_cells) + 2) * 1.2", "current_data_column += 1 for sheet in wb.worksheets: for column_cells in sheet.columns: length =", "= wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1", "= output def generate(self): wb = Workbook() current_data_column = 1 current_ontology_column = 1", "validator.valid_values) + len(validator.valid_values) - 1 > 256: if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\")", "set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation", "get_column_letter(current_set_column), set_sheet) current_set_column += 1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif", "= Workbook() current_data_column = 1 current_ontology_column = 1 current_set_column = 1 current_readme_row =", "= validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif isinstance(validator, SetValidator): # Total size,", "must be < 256 if sum(len(i) for i in validator.valid_values) + len(validator.valid_values) -", "value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator): if not ontology_sheet:", "readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator): if", "isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet)", "else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet in", "output def generate(self): wb = Workbook() current_data_column = 1 current_ontology_column = 1 current_set_column", "(max(len(self.as_text(cell.value)) for cell in column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output)", "1 elif isinstance(validator, SetValidator): # Total size, including separators must be < 256", "checkcel.checkplate import Checkplate class Checkerator(Checkplate): def __init__( self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs)", "data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1 set_columns[column_name] =", "SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter from checkcel.checkplate import Checkplate class Checkerator(Checkplate): def", "len(validator.valid_values) - 1 > 256: if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation =", "1 readme_sheet = wb.active readme_sheet.title = \"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet = None", "+ len(validator.valid_values) - 1 > 256: if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation", "+ 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self, value): return str(value)", "validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet in wb.worksheets: for column_cells", "set_columns[column_name] = get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1", "for i in validator.valid_values) + len(validator.valid_values) - 1 > 256: if not set_sheet:", "wb) current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation:", "get_column_letter from checkcel.checkplate import Checkplate class Checkerator(Checkplate): def __init__( self, output, **kwargs ):", "self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output = output def generate(self): wb =", "if isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column),", "< 256 if sum(len(i) for i in validator.valid_values) + len(validator.valid_values) - 1 >", "sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self, value): return str(value) if value is not", "validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if", "including separators must be < 256 if sum(len(i) for i in validator.valid_values) +", "be < 256 if sum(len(i) for i in validator.valid_values) + len(validator.valid_values) - 1", "output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output = output def generate(self): wb = Workbook()", "= wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif isinstance(validator, SetValidator):", "wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1 else: data_validation =", "1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self, value): return str(value) if value is", "= 1 current_ontology_column = 1 current_set_column = 1 current_readme_row = 1 readme_sheet =", "= None set_sheet = None set_columns = {} for column_name, validator in self.validators.items():", "1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\")", "= get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation =", "1 for sheet in wb.worksheets: for column_cells in sheet.columns: length = (max(len(self.as_text(cell.value)) for", "set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column", "import OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter from checkcel.checkplate import Checkplate class", "value=column_name) if isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column),", "Workbook() current_data_column = 1 current_ontology_column = 1 current_set_column = 1 current_readme_row = 1", "current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation)", "= validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet in wb.worksheets: for", "wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet = None set_columns = {} for column_name, validator", "= wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet = None set_columns = {} for column_name,", "None set_columns = {} for column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row", "in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator,", "column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name)", "not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column +=", "= None set_columns = {} for column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name))", "1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not", "Checkplate class Checkerator(Checkplate): def __init__( self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output =", "data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet in wb.worksheets:", "): super(Checkerator, self).__init__(**kwargs) self.output = output def generate(self): wb = Workbook() current_data_column =", "# Total size, including separators must be < 256 if sum(len(i) for i", "import Checkplate class Checkerator(Checkplate): def __init__( self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output", "get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif isinstance(validator, SetValidator): # Total size, including separators", "wb.save(filename=self.output) def as_text(self, value): return str(value) if value is not None else \"\"", "data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation", "if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet,", "for cell in column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def", "LinkedSetValidator from openpyxl.utils import get_column_letter from checkcel.checkplate import Checkplate class Checkerator(Checkplate): def __init__(", "set_sheet = None set_columns = {} for column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row,", "validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif isinstance(validator, SetValidator): # Total size, including", "data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet in wb.worksheets: for column_cells in sheet.columns: length", "ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif", "\"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet = None set_columns = {}", "self).__init__(**kwargs) self.output = output def generate(self): wb = Workbook() current_data_column = 1 current_ontology_column", "not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1", "data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif isinstance(validator, SetValidator): # Total", "+= 1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if", "current_set_column = 1 current_readme_row = 1 readme_sheet = wb.active readme_sheet.title = \"README\" data_sheet", "self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator):", "+= 1 elif isinstance(validator, SetValidator): # Total size, including separators must be <", "row=1, value=column_name) if isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation =", "current_set_column += 1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator):", "set_sheet, wb) current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if", "* 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self, value): return str(value) if value", "else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not set_sheet:", "get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column))", "get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet", "2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self, value): return str(value) if", "generate(self): wb = Workbook() current_data_column = 1 current_ontology_column = 1 current_set_column = 1", "1 current_ontology_column = 1 current_set_column = 1 current_readme_row = 1 readme_sheet = wb.active", "= 1 current_set_column = 1 current_readme_row = 1 readme_sheet = wb.active readme_sheet.title =", "= {} for column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1", "ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif isinstance(validator,", "elif isinstance(validator, SetValidator): # Total size, including separators must be < 256 if", "SetValidator): # Total size, including separators must be < 256 if sum(len(i) for", "set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation", "= get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for", "wb.worksheets: for column_cells in sheet.columns: length = (max(len(self.as_text(cell.value)) for cell in column_cells) +", "set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column +=", "for column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1,", "i in validator.valid_values) + len(validator.valid_values) - 1 > 256: if not set_sheet: set_sheet", "1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column +=", "data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1 else: data_validation = validator.generate(get_column_letter(current_data_column))", "wb.active readme_sheet.title = \"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet = None", "**kwargs ): super(Checkerator, self).__init__(**kwargs) self.output = output def generate(self): wb = Workbook() current_data_column", "elif isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns,", "current_ontology_column = 1 current_set_column = 1 current_readme_row = 1 readme_sheet = wb.active readme_sheet.title", "= (max(len(self.as_text(cell.value)) for cell in column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length", "import get_column_letter from checkcel.checkplate import Checkplate class Checkerator(Checkplate): def __init__( self, output, **kwargs", "= validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name]", "+= 1 for sheet in wb.worksheets: for column_cells in sheet.columns: length = (max(len(self.as_text(cell.value))", "wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1 set_columns[column_name]", "for column_cells in sheet.columns: length = (max(len(self.as_text(cell.value)) for cell in column_cells) + 2)", "column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self, value): return", "class Checkerator(Checkplate): def __init__( self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output = output", "+= 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet =", "data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet", "ontology_sheet = None set_sheet = None set_columns = {} for column_name, validator in", "in validator.valid_values) + len(validator.valid_values) - 1 > 256: if not set_sheet: set_sheet =", "= validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column)", "sheet.columns: length = (max(len(self.as_text(cell.value)) for cell in column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width", "= validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet =", "in wb.worksheets: for column_cells in sheet.columns: length = (max(len(self.as_text(cell.value)) for cell in column_cells)", "isinstance(validator, SetValidator): # Total size, including separators must be < 256 if sum(len(i)", "= wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1 else: data_validation", "def generate(self): wb = Workbook() current_data_column = 1 current_ontology_column = 1 current_set_column =", "Workbook from checkcel.validators import OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter from checkcel.checkplate", "def __init__( self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output = output def generate(self):", "= 1 readme_sheet = wb.active readme_sheet.title = \"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet =", "1 > 256: if not set_sheet: set_sheet = wb.create_sheet(title=\"Sets\") data_validation = validator.generate(get_column_letter(current_data_column), column_name,", "set_columns = {} for column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row +=", "self.output = output def generate(self): wb = Workbook() current_data_column = 1 current_ontology_column =", "readme_sheet.title = \"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet = None set_sheet = None set_columns", "if not ontology_sheet: ontology_sheet = wb.create_sheet(title=\"Ontologies\") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column +=", "current_readme_row = 1 readme_sheet = wb.active readme_sheet.title = \"README\" data_sheet = wb.create_sheet(title=\"Data\") ontology_sheet", "+= 1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column", "validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column) else:", "ontology_sheet) current_ontology_column += 1 elif isinstance(validator, SetValidator): # Total size, including separators must", "Total size, including separators must be < 256 if sum(len(i) for i in", "column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column)", "= length wb.save(filename=self.output) def as_text(self, value): return str(value) if value is not None", "in column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self, value):" ]
[ "float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x']", "and api responses class WsKline: # takes in a websocket payload on init", "# some wrapper classes for binance websockets and api responses class WsKline: #", "%H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def", "init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y", "__init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\")", "ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i']", "def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y", "self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d = dict()", "float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q'])", "in a websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000", "# takes in a websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline):", "self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d = dict() d['time'] = self.klineCloseTime d['open'] = self.openPrice d['high']", "import datetime # some wrapper classes for binance websockets and api responses class", "self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self):", "datetime # some wrapper classes for binance websockets and api responses class WsKline:", "responses class WsKline: # takes in a websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams", "%H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l'])", "self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d = dict() d['time'] = self.klineCloseTime d['open'] =", "= dict() d['time'] = self.klineCloseTime d['open'] = self.openPrice d['high'] = self.highPrice d['low'] =", "classes for binance websockets and api responses class WsKline: # takes in a", "https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp(", "on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp(", "float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c'])", "self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V'])", "takes in a websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp(", "for binance websockets and api responses class WsKline: # takes in a websocket", "toDict(self): d = dict() d['time'] = self.klineCloseTime d['open'] = self.openPrice d['high'] = self.highPrice", "a websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y", "class WsKline: # takes in a websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def", "websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\")", "self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d = dict() d['time'] = self.klineCloseTime", "wrapper classes for binance websockets and api responses class WsKline: # takes in", "self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d = dict() d['time'] = self.klineCloseTime d['open']", "self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d = dict() d['time'] = self.klineCloseTime d['open'] = self.openPrice", "d['time'] = self.klineCloseTime d['open'] = self.openPrice d['high'] = self.highPrice d['low'] = self.lowPrice d['close']", "some wrapper classes for binance websockets and api responses class WsKline: # takes", "payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s']", "= self.openPrice d['high'] = self.highPrice d['low'] = self.lowPrice d['close'] = self.closePrice return d", "dict() d['time'] = self.klineCloseTime d['open'] = self.openPrice d['high'] = self.highPrice d['low'] = self.lowPrice", "-> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self, ws_kline): self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\")", "self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n'])", "self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d", "self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d = dict() d['time']", "WsKline: # takes in a websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams def __init__(self,", "self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v'])", "binance websockets and api responses class WsKline: # takes in a websocket payload", ").strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h'])", "api responses class WsKline: # takes in a websocket payload on init ->", "%H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o']) self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q'])", "self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d = dict() d['time'] =", "self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime(\"%m-%d-%Y %H:%M:%S\") self.symbol=ws_kline['s'] self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime(\"%m-%d-%Y %H:%M:%S\") self.interval=ws_kline['k']['i'] self.openPrice=float(ws_kline['k']['o'])", "d = dict() d['time'] = self.klineCloseTime d['open'] = self.openPrice d['high'] = self.highPrice d['low']", "self.closePrice=float(ws_kline['k']['c']) self.highPrice=float(ws_kline['k']['h']) self.lowPrice=float(ws_kline['k']['l']) self.baseAssetVolume=float(ws_kline['k']['v']) self.numberOfTrades=int(ws_kline['k']['n']) self.klineClosed=ws_kline['k']['x'] self.quoteAssetVolume=float(ws_kline['k']['q']) self.takerBaseAssetVolume=float(ws_kline['k']['V']) self.takerQuoteAssetVolume=float(ws_kline['k']['Q']) def toDict(self): d =", "= self.klineCloseTime d['open'] = self.openPrice d['high'] = self.highPrice d['low'] = self.lowPrice d['close'] =", "websockets and api responses class WsKline: # takes in a websocket payload on", "def toDict(self): d = dict() d['time'] = self.klineCloseTime d['open'] = self.openPrice d['high'] =", "self.klineCloseTime d['open'] = self.openPrice d['high'] = self.highPrice d['low'] = self.lowPrice d['close'] = self.closePrice", "d['open'] = self.openPrice d['high'] = self.highPrice d['low'] = self.lowPrice d['close'] = self.closePrice return" ]
[ "open('images_rebuilt', 'w') as f: for environment in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task", "import cyan, green, red, yellow from fabric.contrib.console import confirm from fabric.contrib.files import exists", "env.docker.wait(container=container_id, timeout=600) # Clean up logs = env.docker.logs(container=container_id) for container in container_map.containers: map_client.shutdown(container)", "== 0: print green('All tests passed.') else: abort(red('Some tests failed!')) @task def push():", "(docker images) for all environments.''' force = action == 'force' check_changed = action", "fabric.api import (cd, env, execute, hide, local, prefix, prompt, puts, roles, run, sudo,", "tests failed!')) @task def push(): '''Step 3. Merge and push artifacts.''' # TODO:", "from fabric.contrib.console import confirm from fabric.contrib.files import exists @task def build_image(image='production'): if env.docker.build(**env.images[image]['build'])", "force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step 2. Run tests and keep it", "and keep it real.''' # Start the testing runner container_map = env.container_map map_client", "as f: for environment in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests():", "return exit_status = env.docker.wait(container=container_id, timeout=600) # Clean up logs = env.docker.logs(container=container_id) for container", "abort(red('Failed to build image {image}'.format(image=image))) else: print green('Successfully built image {image}'.format(image=image)) @task def", "Clean up logs = env.docker.logs(container=container_id) for container in container_map.containers: map_client.shutdown(container) # Abort or", "env.run('rm -rf images_rebuilt') @task def deploy(): '''Step 4. Deploy artifacts.''' for cname in", "--cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w') as f: for environment in", "map_client = env.map_client info = map_client.startup('runner') container_id = info[0][1]['Id'] # Wait on it", "from fabric.utils import abort from fabric.colors import cyan, green, red, yellow from fabric.contrib.console", "from dockermap.api import MappingDockerClient from fabric.api import (cd, env, execute, hide, local, prefix,", "green, red, yellow from fabric.contrib.console import confirm from fabric.contrib.files import exists @task def", "2. Run tests and keep it real.''' # Start the testing runner container_map", "from fabric.api import (cd, env, execute, hide, local, prefix, prompt, puts, roles, run,", "-rf images_rebuilt') @task def deploy(): '''Step 4. Deploy artifacts.''' for cname in env.containers:", "'force' check_changed = action == 'check' with cd(env.project_path): remote, dest_branch = env.remote_ref.split('/', 1)", "import ClientConfiguration from dockermap.api import MappingDockerClient from fabric.api import (cd, env, execute, hide,", "clean up env.run('rm -rf images_rebuilt') @task def deploy(): '''Step 4. Deploy artifacts.''' for", "env.map_client info = map_client.startup('runner') container_id = info[0][1]['Id'] # Wait on it to return", "fabric.contrib.files import exists @task def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to build", "from fabric.colors import cyan, green, red, yellow from fabric.contrib.console import confirm from fabric.contrib.files", "@task def push(): '''Step 3. Merge and push artifacts.''' # TODO: push to", "logs if exit_status == 0: print green('All tests passed.') else: abort(red('Some tests failed!'))", "check_changed: with hide('running', 'stdout'): changed_files = env.run('git diff-index --cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines()", "green('Successfully built image {image}'.format(image=image)) @task def build(action='check'): '''Step 1. Build artifacts (docker images)", "all environments.''' force = action == 'force' check_changed = action == 'check' with", "env.container_map map_client = env.map_client info = map_client.startup('runner') container_id = info[0][1]['Id'] # Wait on", "sudo, task) from fabric.utils import abort from fabric.colors import cyan, green, red, yellow", "task) from fabric.utils import abort from fabric.colors import cyan, green, red, yellow from", "print logs if exit_status == 0: print green('All tests passed.') else: abort(red('Some tests", "for environment in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step 2.", "env.remote_ref.split('/', 1) changed_files = [] if check_changed: with hide('running', 'stdout'): changed_files = env.run('git", "images_rebuilt') @task def deploy(): '''Step 4. Deploy artifacts.''' for cname in env.containers: env.map_client.shutdown(cname)", "'{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w') as f: for environment in env.manager.get_rebuild_steps('common', changed_files, force=force):", "real.''' # Start the testing runner container_map = env.container_map map_client = env.map_client info", "exists @task def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to build image {image}'.format(image=image)))", "check_changed = action == 'check' with cd(env.project_path): remote, dest_branch = env.remote_ref.split('/', 1) changed_files", "capture=True).splitlines() with open('images_rebuilt', 'w') as f: for environment in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment)", "tests and keep it real.''' # Start the testing runner container_map = env.container_map", "force = action == 'force' check_changed = action == 'check' with cd(env.project_path): remote,", "action == 'force' check_changed = action == 'check' with cd(env.project_path): remote, dest_branch =", "import MappingDockerClient from fabric.api import (cd, env, execute, hide, local, prefix, prompt, puts,", "ClientConfiguration from dockermap.api import MappingDockerClient from fabric.api import (cd, env, execute, hide, local,", "{image}'.format(image=image))) else: print green('Successfully built image {image}'.format(image=image)) @task def build(action='check'): '''Step 1. Build", "'''Step 1. Build artifacts (docker images) for all environments.''' force = action ==", "remote, dest_branch = env.remote_ref.split('/', 1) changed_files = [] if check_changed: with hide('running', 'stdout'):", "container_map = env.container_map map_client = env.map_client info = map_client.startup('runner') container_id = info[0][1]['Id'] #", "or succeed. print logs if exit_status == 0: print green('All tests passed.') else:", "Perform clean up env.run('rm -rf images_rebuilt') @task def deploy(): '''Step 4. Deploy artifacts.'''", "None: abort(red('Failed to build image {image}'.format(image=image))) else: print green('Successfully built image {image}'.format(image=image)) @task", "'w') as f: for environment in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def", "f: for environment in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step", "else: abort(red('Some tests failed!')) @task def push(): '''Step 3. Merge and push artifacts.'''", "container in container_map.containers: map_client.shutdown(container) # Abort or succeed. print logs if exit_status ==", "print green('All tests passed.') else: abort(red('Some tests failed!')) @task def push(): '''Step 3.", "push to registry # Perform clean up env.run('rm -rf images_rebuilt') @task def deploy():", "= map_client.startup('runner') container_id = info[0][1]['Id'] # Wait on it to return exit_status =", "'''Step 3. Merge and push artifacts.''' # TODO: push to registry # Perform", "roles, run, sudo, task) from fabric.utils import abort from fabric.colors import cyan, green,", "1. Build artifacts (docker images) for all environments.''' force = action == 'force'", "else: print green('Successfully built image {image}'.format(image=image)) @task def build(action='check'): '''Step 1. Build artifacts", "push artifacts.''' # TODO: push to registry # Perform clean up env.run('rm -rf", "red, yellow from fabric.contrib.console import confirm from fabric.contrib.files import exists @task def build_image(image='production'):", "tests passed.') else: abort(red('Some tests failed!')) @task def push(): '''Step 3. Merge and", "<filename>continuity/tasks.py from dockermap.map.config import ClientConfiguration from dockermap.api import MappingDockerClient from fabric.api import (cd,", "= env.remote_ref.split('/', 1) changed_files = [] if check_changed: with hide('running', 'stdout'): changed_files =", "changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step 2. Run tests and keep", "changed_files = [] if check_changed: with hide('running', 'stdout'): changed_files = env.run('git diff-index --cached", "env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to build image {image}'.format(image=image))) else: print green('Successfully built image", "abort from fabric.colors import cyan, green, red, yellow from fabric.contrib.console import confirm from", "env.docker.logs(container=container_id) for container in container_map.containers: map_client.shutdown(container) # Abort or succeed. print logs if", "from dockermap.map.config import ClientConfiguration from dockermap.api import MappingDockerClient from fabric.api import (cd, env,", "prompt, puts, roles, run, sudo, task) from fabric.utils import abort from fabric.colors import", "environment in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step 2. Run", "up env.run('rm -rf images_rebuilt') @task def deploy(): '''Step 4. Deploy artifacts.''' for cname", "= action == 'force' check_changed = action == 'check' with cd(env.project_path): remote, dest_branch", "container_id = info[0][1]['Id'] # Wait on it to return exit_status = env.docker.wait(container=container_id, timeout=600)", "logs = env.docker.logs(container=container_id) for container in container_map.containers: map_client.shutdown(container) # Abort or succeed. print", "green('All tests passed.') else: abort(red('Some tests failed!')) @task def push(): '''Step 3. Merge", "info = map_client.startup('runner') container_id = info[0][1]['Id'] # Wait on it to return exit_status", "cd(env.project_path): remote, dest_branch = env.remote_ref.split('/', 1) changed_files = [] if check_changed: with hide('running',", "artifacts.''' # TODO: push to registry # Perform clean up env.run('rm -rf images_rebuilt')", "def build(action='check'): '''Step 1. Build artifacts (docker images) for all environments.''' force =", "' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w') as f: for environment in env.manager.get_rebuild_steps('common', changed_files,", "dockermap.api import MappingDockerClient from fabric.api import (cd, env, execute, hide, local, prefix, prompt,", "testing runner container_map = env.container_map map_client = env.map_client info = map_client.startup('runner') container_id =", "# Abort or succeed. print logs if exit_status == 0: print green('All tests", "# Perform clean up env.run('rm -rf images_rebuilt') @task def deploy(): '''Step 4. Deploy", "keep it real.''' # Start the testing runner container_map = env.container_map map_client =", "'check' with cd(env.project_path): remote, dest_branch = env.remote_ref.split('/', 1) changed_files = [] if check_changed:", "1) changed_files = [] if check_changed: with hide('running', 'stdout'): changed_files = env.run('git diff-index", "if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to build image {image}'.format(image=image))) else: print green('Successfully built", "{image}'.format(image=image)) @task def build(action='check'): '''Step 1. Build artifacts (docker images) for all environments.'''", "for all environments.''' force = action == 'force' check_changed = action == 'check'", "succeed. print logs if exit_status == 0: print green('All tests passed.') else: abort(red('Some", "'''Step 2. Run tests and keep it real.''' # Start the testing runner", "map_client.shutdown(container) # Abort or succeed. print logs if exit_status == 0: print green('All", "images) for all environments.''' force = action == 'force' check_changed = action ==", "to registry # Perform clean up env.run('rm -rf images_rebuilt') @task def deploy(): '''Step", "= env.container_map map_client = env.map_client info = map_client.startup('runner') container_id = info[0][1]['Id'] # Wait", "3. Merge and push artifacts.''' # TODO: push to registry # Perform clean", "info[0][1]['Id'] # Wait on it to return exit_status = env.docker.wait(container=container_id, timeout=600) # Clean", "push(): '''Step 3. Merge and push artifacts.''' # TODO: push to registry #", "def push(): '''Step 3. Merge and push artifacts.''' # TODO: push to registry", "# Wait on it to return exit_status = env.docker.wait(container=container_id, timeout=600) # Clean up", "Start the testing runner container_map = env.container_map map_client = env.map_client info = map_client.startup('runner')", "dest_branch = env.remote_ref.split('/', 1) changed_files = [] if check_changed: with hide('running', 'stdout'): changed_files", "yellow from fabric.contrib.console import confirm from fabric.contrib.files import exists @task def build_image(image='production'): if", "== 'force' check_changed = action == 'check' with cd(env.project_path): remote, dest_branch = env.remote_ref.split('/',", "build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step 2. Run tests and keep it real.'''", "failed!')) @task def push(): '''Step 3. Merge and push artifacts.''' # TODO: push", "on it to return exit_status = env.docker.wait(container=container_id, timeout=600) # Clean up logs =", "exit_status = env.docker.wait(container=container_id, timeout=600) # Clean up logs = env.docker.logs(container=container_id) for container in", "if exit_status == 0: print green('All tests passed.') else: abort(red('Some tests failed!')) @task", "image {image}'.format(image=image)) @task def build(action='check'): '''Step 1. Build artifacts (docker images) for all", "if check_changed: with hide('running', 'stdout'): changed_files = env.run('git diff-index --cached --name-only ' '{working_ref}'.format(**env),", "environments.''' force = action == 'force' check_changed = action == 'check' with cd(env.project_path):", "fabric.utils import abort from fabric.colors import cyan, green, red, yellow from fabric.contrib.console import", "Run tests and keep it real.''' # Start the testing runner container_map =", "= env.map_client info = map_client.startup('runner') container_id = info[0][1]['Id'] # Wait on it to", "'stdout'): changed_files = env.run('git diff-index --cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w')", "# Clean up logs = env.docker.logs(container=container_id) for container in container_map.containers: map_client.shutdown(container) # Abort", "print green('Successfully built image {image}'.format(image=image)) @task def build(action='check'): '''Step 1. Build artifacts (docker", "env, execute, hide, local, prefix, prompt, puts, roles, run, sudo, task) from fabric.utils", "--name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w') as f: for environment in env.manager.get_rebuild_steps('common',", "the testing runner container_map = env.container_map map_client = env.map_client info = map_client.startup('runner') container_id", "dockermap.map.config import ClientConfiguration from dockermap.api import MappingDockerClient from fabric.api import (cd, env, execute,", "= action == 'check' with cd(env.project_path): remote, dest_branch = env.remote_ref.split('/', 1) changed_files =", "build(action='check'): '''Step 1. Build artifacts (docker images) for all environments.''' force = action", "# TODO: push to registry # Perform clean up env.run('rm -rf images_rebuilt') @task", "TODO: push to registry # Perform clean up env.run('rm -rf images_rebuilt') @task def", "runner container_map = env.container_map map_client = env.map_client info = map_client.startup('runner') container_id = info[0][1]['Id']", "import exists @task def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to build image", "action == 'check' with cd(env.project_path): remote, dest_branch = env.remote_ref.split('/', 1) changed_files = []", "map_client.startup('runner') container_id = info[0][1]['Id'] # Wait on it to return exit_status = env.docker.wait(container=container_id,", "0: print green('All tests passed.') else: abort(red('Some tests failed!')) @task def push(): '''Step", "it real.''' # Start the testing runner container_map = env.container_map map_client = env.map_client", "= env.docker.wait(container=container_id, timeout=600) # Clean up logs = env.docker.logs(container=container_id) for container in container_map.containers:", "cyan, green, red, yellow from fabric.contrib.console import confirm from fabric.contrib.files import exists @task", "build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to build image {image}'.format(image=image))) else: print green('Successfully", "@task def build(action='check'): '''Step 1. Build artifacts (docker images) for all environments.''' force", "Build artifacts (docker images) for all environments.''' force = action == 'force' check_changed", "artifacts (docker images) for all environments.''' force = action == 'force' check_changed =", "hide('running', 'stdout'): changed_files = env.run('git diff-index --cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt',", "to return exit_status = env.docker.wait(container=container_id, timeout=600) # Clean up logs = env.docker.logs(container=container_id) for", "@task def run_tests(): '''Step 2. Run tests and keep it real.''' # Start", "prefix, prompt, puts, roles, run, sudo, task) from fabric.utils import abort from fabric.colors", "with hide('running', 'stdout'): changed_files = env.run('git diff-index --cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with", "[] if check_changed: with hide('running', 'stdout'): changed_files = env.run('git diff-index --cached --name-only '", "Wait on it to return exit_status = env.docker.wait(container=container_id, timeout=600) # Clean up logs", "@task def deploy(): '''Step 4. Deploy artifacts.''' for cname in env.containers: env.map_client.shutdown(cname) env.map_client.startup(cname)", "with open('images_rebuilt', 'w') as f: for environment in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment))", "= info[0][1]['Id'] # Wait on it to return exit_status = env.docker.wait(container=container_id, timeout=600) #", "local, prefix, prompt, puts, roles, run, sudo, task) from fabric.utils import abort from", "import (cd, env, execute, hide, local, prefix, prompt, puts, roles, run, sudo, task)", "= [] if check_changed: with hide('running', 'stdout'): changed_files = env.run('git diff-index --cached --name-only", "registry # Perform clean up env.run('rm -rf images_rebuilt') @task def deploy(): '''Step 4.", "import abort from fabric.colors import cyan, green, red, yellow from fabric.contrib.console import confirm", "def run_tests(): '''Step 2. Run tests and keep it real.''' # Start the", "run, sudo, task) from fabric.utils import abort from fabric.colors import cyan, green, red,", "confirm from fabric.contrib.files import exists @task def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed", "to build image {image}'.format(image=image))) else: print green('Successfully built image {image}'.format(image=image)) @task def build(action='check'):", "env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step 2. Run tests and", "hide, local, prefix, prompt, puts, roles, run, sudo, task) from fabric.utils import abort", "changed_files = env.run('git diff-index --cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w') as", "in container_map.containers: map_client.shutdown(container) # Abort or succeed. print logs if exit_status == 0:", "Merge and push artifacts.''' # TODO: push to registry # Perform clean up", "env.run('git diff-index --cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w') as f: for", "puts, roles, run, sudo, task) from fabric.utils import abort from fabric.colors import cyan,", "@task def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to build image {image}'.format(image=image))) else:", "f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step 2. Run tests and keep it real.''' #", "and push artifacts.''' # TODO: push to registry # Perform clean up env.run('rm", "build image {image}'.format(image=image))) else: print green('Successfully built image {image}'.format(image=image)) @task def build(action='check'): '''Step", "up logs = env.docker.logs(container=container_id) for container in container_map.containers: map_client.shutdown(container) # Abort or succeed.", "import confirm from fabric.contrib.files import exists @task def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None:", "from fabric.contrib.files import exists @task def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to", "Abort or succeed. print logs if exit_status == 0: print green('All tests passed.')", "MappingDockerClient from fabric.api import (cd, env, execute, hide, local, prefix, prompt, puts, roles,", "image {image}'.format(image=image))) else: print green('Successfully built image {image}'.format(image=image)) @task def build(action='check'): '''Step 1.", "abort(red('Some tests failed!')) @task def push(): '''Step 3. Merge and push artifacts.''' #", "container_map.containers: map_client.shutdown(container) # Abort or succeed. print logs if exit_status == 0: print", "diff-index --cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w') as f: for environment", "for container in container_map.containers: map_client.shutdown(container) # Abort or succeed. print logs if exit_status", "fabric.colors import cyan, green, red, yellow from fabric.contrib.console import confirm from fabric.contrib.files import", "= env.run('git diff-index --cached --name-only ' '{working_ref}'.format(**env), capture=True).splitlines() with open('images_rebuilt', 'w') as f:", "timeout=600) # Clean up logs = env.docker.logs(container=container_id) for container in container_map.containers: map_client.shutdown(container) #", "with cd(env.project_path): remote, dest_branch = env.remote_ref.split('/', 1) changed_files = [] if check_changed: with", "built image {image}'.format(image=image)) @task def build(action='check'): '''Step 1. Build artifacts (docker images) for", "# Start the testing runner container_map = env.container_map map_client = env.map_client info =", "= env.docker.logs(container=container_id) for container in container_map.containers: map_client.shutdown(container) # Abort or succeed. print logs", "in env.manager.get_rebuild_steps('common', changed_files, force=force): build_image(environment) f.write('{}\\n'.format(environment)) @task def run_tests(): '''Step 2. Run tests", "execute, hide, local, prefix, prompt, puts, roles, run, sudo, task) from fabric.utils import", "it to return exit_status = env.docker.wait(container=container_id, timeout=600) # Clean up logs = env.docker.logs(container=container_id)", "fabric.contrib.console import confirm from fabric.contrib.files import exists @task def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is", "exit_status == 0: print green('All tests passed.') else: abort(red('Some tests failed!')) @task def", "passed.') else: abort(red('Some tests failed!')) @task def push(): '''Step 3. Merge and push", "is None: abort(red('Failed to build image {image}'.format(image=image))) else: print green('Successfully built image {image}'.format(image=image))", "== 'check' with cd(env.project_path): remote, dest_branch = env.remote_ref.split('/', 1) changed_files = [] if", "def build_image(image='production'): if env.docker.build(**env.images[image]['build']) is None: abort(red('Failed to build image {image}'.format(image=image))) else: print", "run_tests(): '''Step 2. Run tests and keep it real.''' # Start the testing", "(cd, env, execute, hide, local, prefix, prompt, puts, roles, run, sudo, task) from" ]
[ "import migrations, models class Migration(migrations.Migration): dependencies = [ ('control_produccion', '0007_auto_20160623_1052'), ] operations =", "model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ),", "name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_sh_id',", "[ ('control_produccion', '0007_auto_20160623_1052'), ] operations = [ migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order',", "migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id',", "'0007_auto_20160623_1052'), ] operations = [ migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine', ),", "), migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ),", "unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('control_produccion', '0007_auto_20160623_1052'),", "] operations = [ migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField(", "model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process',", "migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField(", "from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "<filename>control_produccion/migrations/0008_auto_20160808_1812.py # -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-08-09", "# Generated by Django 1.9.4 on 2016-08-09 00:12 from __future__ import unicode_literals from", "model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process',", "model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process',", "dependencies = [ ('control_produccion', '0007_auto_20160623_1052'), ] operations = [ migrations.RemoveField( model_name='order', name='order_date_created', ),", "), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ),", "name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started',", "model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process',", "field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_sh_id', field=models.PositiveSmallIntegerField(default=0),", "), migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process',", "class Migration(migrations.Migration): dependencies = [ ('control_produccion', '0007_auto_20160623_1052'), ] operations = [ migrations.RemoveField( model_name='order',", "= [ ('control_produccion', '0007_auto_20160623_1052'), ] operations = [ migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField(", "preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('control_produccion', '0007_auto_20160623_1052'), ]", "1.9.4 on 2016-08-09 00:12 from __future__ import unicode_literals from django.db import migrations, models", "migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField(", "model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order',", "name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField(", "-*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-08-09 00:12 from", "import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('control_produccion',", "name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False,", "), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ),", "name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started',", "Migration(migrations.Migration): dependencies = [ ('control_produccion', '0007_auto_20160623_1052'), ] operations = [ migrations.RemoveField( model_name='order', name='order_date_created',", "models class Migration(migrations.Migration): dependencies = [ ('control_produccion', '0007_auto_20160623_1052'), ] operations = [ migrations.RemoveField(", "name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused',", "[ migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ),", "by Django 1.9.4 on 2016-08-09 00:12 from __future__ import unicode_literals from django.db import", "= [ migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished',", "operations = [ migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process',", "), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ),", "migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), ]", "migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField(", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('control_produccion', '0007_auto_20160623_1052'), ] operations", "('control_produccion', '0007_auto_20160623_1052'), ] operations = [ migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine',", "# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-08-09 00:12", "utf-8 -*- # Generated by Django 1.9.4 on 2016-08-09 00:12 from __future__ import", "__future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0),", "model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process',", "), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ),", "-*- # Generated by Django 1.9.4 on 2016-08-09 00:12 from __future__ import unicode_literals", "migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField(", "migrations.RemoveField( model_name='order', name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField(", "migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField(", "), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ),", "model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False, ), migrations.AddField( model_name='process', name='process_group_sh_id', field=models.PositiveSmallIntegerField(default=0),", "model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process',", "name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused',", "name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id',", "2016-08-09 00:12 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration):", "Generated by Django 1.9.4 on 2016-08-09 00:12 from __future__ import unicode_literals from django.db", "migrations, models class Migration(migrations.Migration): dependencies = [ ('control_produccion', '0007_auto_20160623_1052'), ] operations = [", "name='order_date_created', ), migrations.RemoveField( model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start',", "), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ),", "Django 1.9.4 on 2016-08-09 00:12 from __future__ import unicode_literals from django.db import migrations,", "00:12 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies", "model_name='order', name='order_machine', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process',", "migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started', ), migrations.AddField( model_name='order', name='order_sh_id', field=models.PositiveSmallIntegerField(default=0), preserve_default=False,", "migrations.RemoveField( model_name='order_process', name='order_process_datetime_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_pause_start', ), migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField(", "coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-08-09 00:12 from __future__", "name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished',", "migrations.RemoveField( model_name='order_process', name='order_process_datetime_started', ), migrations.RemoveField( model_name='order_process', name='order_process_is_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField(", "on 2016-08-09 00:12 from __future__ import unicode_literals from django.db import migrations, models class", "), migrations.RemoveField( model_name='order_process', name='order_process_seconds_paused', ), migrations.RemoveField( model_name='order_process', name='order_process_user_finished', ), migrations.RemoveField( model_name='order_process', name='order_process_user_started', )," ]
[ "# Example script to use mlfb class #a = mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1)", "class #a = mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66", "import mlfb def main(): # Example script to use mlfb class #a =", "# (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value)", "use mlfb class #a = mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999", "#a = mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188'", "from lib import mlfb def main(): # Example script to use mlfb class", "Example script to use mlfb class #a = mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a", "= mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988'", "mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97", "import mlfb #from lib import mlfb_test4 from lib import mlfb def main(): #", "input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature' input_value=-9", "(type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5", "utf-8 -*- from configparser import ConfigParser #from lib import mlfb #from lib import", "coding: utf-8 -*- from configparser import ConfigParser #from lib import mlfb #from lib", "= mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665))", "input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature' input_value=-9 # get rows a.get_rows_from_postgre_to_numpy(input_parameter,input_value)", "input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature' input_value=-9 # get", "#a = mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55", "#!/usr/bin/python # -*- coding: utf-8 -*- from configparser import ConfigParser #from lib import", "lib import mlfb def main(): # Example script to use mlfb class #a", "#from lib import mlfb_test4 from lib import mlfb def main(): # Example script", "mlfb_test4 from lib import mlfb def main(): # Example script to use mlfb", "#a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature' input_value=-9 # get rows a.get_rows_from_postgre_to_numpy(input_parameter,input_value) if __name__=='__main__': main()", "#a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature' input_value=-9 # get rows a.get_rows_from_postgre_to_numpy(input_parameter,input_value) if __name__=='__main__':", "'20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature'", "# -*- coding: utf-8 -*- from configparser import ConfigParser #from lib import mlfb", "import mlfb_test4 from lib import mlfb def main(): # Example script to use", "lib import mlfb #from lib import mlfb_test4 from lib import mlfb def main():", "#input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99')", "main(): # Example script to use mlfb class #a = mlfb_test4.mlfb_test4(1) #a =", "= mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' #", "ConfigParser #from lib import mlfb #from lib import mlfb_test4 from lib import mlfb", "input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature' input_value=-9 #", "script to use mlfb class #a = mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a =", "mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in", "from configparser import ConfigParser #from lib import mlfb #from lib import mlfb_test4 from", "#input1=97 #input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441", "#input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455", "configparser import ConfigParser #from lib import mlfb #from lib import mlfb_test4 from lib", "'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1", "to use mlfb class #a = mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a = mlfb.mlfb(1)", "input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature' input_value=-9 # get rows a.get_rows_from_postgre_to_numpy(input_parameter,input_value) if", "def main(): # Example script to use mlfb class #a = mlfb_test4.mlfb_test4(1) #a", "#type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature'", "#input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288'", "-*- from configparser import ConfigParser #from lib import mlfb #from lib import mlfb_test4", "<gh_stars>0 #!/usr/bin/python # -*- coding: utf-8 -*- from configparser import ConfigParser #from lib", "mlfb #from lib import mlfb_test4 from lib import mlfb def main(): # Example", "-*- coding: utf-8 -*- from configparser import ConfigParser #from lib import mlfb #from", "mlfb def main(): # Example script to use mlfb class #a = mlfb_test4.mlfb_test4(1)", "#input1='atest9988' #input1=97 #input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000' input_location_id=455 input_parameter='test2para'", "a = mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' # (type_in, 'null',", "mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999 #input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' # (type_in,", "lib import mlfb_test4 from lib import mlfb def main(): # Example script to", "#input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null' input_time='20180226T165000'", "import ConfigParser #from lib import mlfb #from lib import mlfb_test4 from lib import", "input_location_id=455 input_parameter='test2para' input_value=441 #a.insert_row_trains_1('test99') #a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value) #input_location_id=5 input_location_id=1 #input_parameter='temperature' input_parameter='temperature' input_value=-9 # get rows", "#from lib import mlfb #from lib import mlfb_test4 from lib import mlfb def", "mlfb class #a = mlfb_test4.mlfb_test4(1) #a = mlfb.mlfb_test4(1) a = mlfb.mlfb(1) #input1=999 #input1=99.88", "#input1=99.88 #input1=99,66 #input1=99.55 #input1='atest9988' #input1=97 #input1='atest1188' # (type_in, 'null', '20180226T165000',666,'testpara1',665)) #type_in,time_in,location_id_in,parameter_in,value_in input_type='4test2288' input_source='null'" ]
[ "Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT License', classifiers=[ \"Programming Language :: Python", "native import statements as we don't want to depend on the package being", "long_description = readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0',", ":: OSI Approved :: MIT License\", \"Operating System :: Microsoft :: Windows\", ],", "import setuptools import importlib # Avoid native import statements as we don't want", "yet. def load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return", "spec = importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version = load_module(\"rlbottraining.version\",", "return module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\")", "`python -m rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] }, package_data={ 'rlbottraining': [ f'{paths._match_config_dir}/*.cfg', f'{paths._example_bot_dir}/*/*.cfg',", "writing training for Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league',", "import statements as we don't want to depend on the package being created", "framework for writing training for Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>',", "for Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'],", "Microsoft :: Windows\", ], entry_points={ # Allow people to run `rlbottraining` instead of", "as readme_file: long_description = readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy',", "importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths", "'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A framework for writing training for Rocket League", "with open(\"README.md\", \"r\") as readme_file: long_description = readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0',", "= readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__,", "url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT License', classifiers=[ \"Programming Language :: Python :: 3\",", "package being created yet. def load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name, full_path) module =", "'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] }, package_data={ 'rlbottraining': [ f'{paths._match_config_dir}/*.cfg', f'{paths._example_bot_dir}/*/*.cfg', str(paths._website_static_source), str(paths._example_rl_custom_training_json), ]", "load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as readme_file: long_description =", "instead of `python -m rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] }, package_data={ 'rlbottraining': [", "importlib # Avoid native import statements as we don't want to depend on", "load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version", "3\", \"License :: OSI Approved :: MIT License\", \"Operating System :: Microsoft ::", "training for Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training',", "= importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\")", "'train'], license='MIT License', classifiers=[ \"Programming Language :: Python :: 3\", \"License :: OSI", "= load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as readme_file: long_description = readme_file.read() setuptools.setup( name='rlbottraining',", "module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as", "full_path): spec = importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version =", "def load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module", "System :: Microsoft :: Windows\", ], entry_points={ # Allow people to run `rlbottraining`", "run `rlbottraining` instead of `python -m rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] }, package_data={", "open(\"README.md\", \"r\") as readme_file: long_description = readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt',", "\"License :: OSI Approved :: MIT License\", \"Operating System :: Microsoft :: Windows\",", "statements as we don't want to depend on the package being created yet.", "to depend on the package being created yet. def load_module(module_name, full_path): spec =", "spec.loader.exec_module(module) return module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\",", "classifiers=[ \"Programming Language :: Python :: 3\", \"License :: OSI Approved :: MIT", "bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT License', classifiers=[", "to run `rlbottraining` instead of `python -m rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] },", "entry_points={ # Allow people to run `rlbottraining` instead of `python -m rlbottraining` 'console_scripts':", "Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT", "`rlbottraining` instead of `python -m rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] }, package_data={ 'rlbottraining':", "the package being created yet. def load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name, full_path) module", "don't want to depend on the package being created yet. def load_module(module_name, full_path):", "license='MIT License', classifiers=[ \"Programming Language :: Python :: 3\", \"License :: OSI Approved", "module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\",", "Approved :: MIT License\", \"Operating System :: Microsoft :: Windows\", ], entry_points={ #", "\"Operating System :: Microsoft :: Windows\", ], entry_points={ # Allow people to run", "created yet. def load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module)", "= load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as readme_file: long_description", "name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A framework for", "'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A framework for writing training for Rocket League bots.',", "version=version.__version__, description='A framework for writing training for Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot", "setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A framework", ":: Python :: 3\", \"License :: OSI Approved :: MIT License\", \"Operating System", "Allow people to run `rlbottraining` instead of `python -m rlbottraining` 'console_scripts': ['rlbottraining =", "for writing training for Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining',", "paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as readme_file: long_description = readme_file.read() setuptools.setup(", "'training', 'train'], license='MIT License', classifiers=[ \"Programming Language :: Python :: 3\", \"License ::", "= importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\")", "long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT License', classifiers=[ \"Programming Language", ":: Microsoft :: Windows\", ], entry_points={ # Allow people to run `rlbottraining` instead", ":: 3\", \"License :: OSI Approved :: MIT License\", \"Operating System :: Microsoft", ":: MIT License\", \"Operating System :: Microsoft :: Windows\", ], entry_points={ # Allow", "'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A framework for writing training for Rocket", "= rlbottraining.__main__:main'] }, package_data={ 'rlbottraining': [ f'{paths._match_config_dir}/*.cfg', f'{paths._example_bot_dir}/*/*.cfg', str(paths._website_static_source), str(paths._example_rl_custom_training_json), ] }, )", "League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT License',", "# Allow people to run `rlbottraining` instead of `python -m rlbottraining` 'console_scripts': ['rlbottraining", "], python_requires='>=3.7.0', version=version.__version__, description='A framework for writing training for Rocket League bots.', long_description=long_description,", "readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A", "on the package being created yet. def load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name, full_path)", "version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as readme_file:", "\"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as readme_file: long_description = readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[", "people to run `rlbottraining` instead of `python -m rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main']", "as we don't want to depend on the package being created yet. def", "depend on the package being created yet. def load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name,", "# Avoid native import statements as we don't want to depend on the", "we don't want to depend on the package being created yet. def load_module(module_name,", "\"Programming Language :: Python :: 3\", \"License :: OSI Approved :: MIT License\",", "Python :: 3\", \"License :: OSI Approved :: MIT License\", \"Operating System ::", "keywords=['rocket-league', 'training', 'train'], license='MIT License', classifiers=[ \"Programming Language :: Python :: 3\", \"License", "Avoid native import statements as we don't want to depend on the package", "readme_file: long_description = readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ],", "MIT License\", \"Operating System :: Microsoft :: Windows\", ], entry_points={ # Allow people", "'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A framework for writing training for", "load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as readme_file: long_description = readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(),", "Language :: Python :: 3\", \"License :: OSI Approved :: MIT License\", \"Operating", "License\", \"Operating System :: Microsoft :: Windows\", ], entry_points={ # Allow people to", "\"r\") as readme_file: long_description = readme_file.read() setuptools.setup( name='rlbottraining', packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog',", "of `python -m rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] }, package_data={ 'rlbottraining': [ f'{paths._match_config_dir}/*.cfg',", "importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with", "full_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module version = load_module(\"rlbottraining.version\", \"rlbottraining/version.py\") paths =", "Windows\", ], entry_points={ # Allow people to run `rlbottraining` instead of `python -m", "], entry_points={ # Allow people to run `rlbottraining` instead of `python -m rlbottraining`", "packages=setuptools.find_packages(), install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A framework for writing", "['rlbottraining = rlbottraining.__main__:main'] }, package_data={ 'rlbottraining': [ f'{paths._match_config_dir}/*.cfg', f'{paths._example_bot_dir}/*/*.cfg', str(paths._website_static_source), str(paths._example_rl_custom_training_json), ] },", "author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT License', classifiers=[ \"Programming Language ::", "rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] }, package_data={ 'rlbottraining': [ f'{paths._match_config_dir}/*.cfg', f'{paths._example_bot_dir}/*/*.cfg', str(paths._website_static_source), str(paths._example_rl_custom_training_json),", "author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT License', classifiers=[ \"Programming Language :: Python ::", "OSI Approved :: MIT License\", \"Operating System :: Microsoft :: Windows\", ], entry_points={", "being created yet. def load_module(module_name, full_path): spec = importlib.util.spec_from_file_location(module_name, full_path) module = importlib.util.module_from_spec(spec)", "<gh_stars>0 import setuptools import importlib # Avoid native import statements as we don't", "import importlib # Avoid native import statements as we don't want to depend", "setuptools import importlib # Avoid native import statements as we don't want to", ":: Windows\", ], entry_points={ # Allow people to run `rlbottraining` instead of `python", "-m rlbottraining` 'console_scripts': ['rlbottraining = rlbottraining.__main__:main'] }, package_data={ 'rlbottraining': [ f'{paths._match_config_dir}/*.cfg', f'{paths._example_bot_dir}/*/*.cfg', str(paths._website_static_source),", "want to depend on the package being created yet. def load_module(module_name, full_path): spec", "description='A framework for writing training for Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community',", "\"rlbottraining/version.py\") paths = load_module(\"rlbottraining.paths\", \"rlbottraining/paths.py\") with open(\"README.md\", \"r\") as readme_file: long_description = readme_file.read()", "python_requires='>=3.7.0', version=version.__version__, description='A framework for writing training for Rocket League bots.', long_description=long_description, long_description_content_type=\"text/markdown\",", "License', classifiers=[ \"Programming Language :: Python :: 3\", \"License :: OSI Approved ::", "long_description=long_description, long_description_content_type=\"text/markdown\", author='RLBot Community', author_email='<EMAIL>', url='https://github.com/RLBot/RLBotTraining', keywords=['rocket-league', 'training', 'train'], license='MIT License', classifiers=[ \"Programming", "install_requires=[ 'rlbot>=1.25.0', 'docopt', 'watchdog', 'numpy', ], python_requires='>=3.7.0', version=version.__version__, description='A framework for writing training" ]
[ "- 1. imgarr3 = (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3 *= mask imgarr3 =", "= 2.*imgarr - 1. imgarr3 = (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3 *= mask", "\"\"\" see if normals are plausible by looking at their norm. \"\"\" import", "~(imgarr3==3.0) imgarr3 *= mask imgarr3 = np.sqrt(imgarr3) print imgarr3.min(), imgarr3.max() pl.matshow(imgarr3) pl.colorbar() pl.show()", "print imgarr.shape imgarr2 = 2.*imgarr - 1. imgarr3 = (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0)", "at their norm. \"\"\" import sys import Image import numpy as np import", "imgarr3 *= mask imgarr3 = np.sqrt(imgarr3) print imgarr3.min(), imgarr3.max() pl.matshow(imgarr3) pl.colorbar() pl.show() #print", "*= mask imgarr3 = np.sqrt(imgarr3) print imgarr3.min(), imgarr3.max() pl.matshow(imgarr3) pl.colorbar() pl.show() #print imgarr2.min(),", "Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 = 2.*imgarr - 1. imgarr3 =", "#!/usr/bin/env python \"\"\" see if normals are plausible by looking at their norm.", "see if normals are plausible by looking at their norm. \"\"\" import sys", "looking at their norm. \"\"\" import sys import Image import numpy as np", "if normals are plausible by looking at their norm. \"\"\" import sys import", "= sys.argv[1] img = Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 = 2.*imgarr", "sys import Image import numpy as np import matplotlib.pyplot as pl img_fname =", "import matplotlib.pyplot as pl img_fname = sys.argv[1] img = Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255.", "= Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 = 2.*imgarr - 1. imgarr3", "2.*imgarr - 1. imgarr3 = (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3 *= mask imgarr3", "are plausible by looking at their norm. \"\"\" import sys import Image import", "numpy as np import matplotlib.pyplot as pl img_fname = sys.argv[1] img = Image.open(img_fname)", "matplotlib.pyplot as pl img_fname = sys.argv[1] img = Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print", "normals are plausible by looking at their norm. \"\"\" import sys import Image", "\"\"\" import sys import Image import numpy as np import matplotlib.pyplot as pl", "mask imgarr3 = np.sqrt(imgarr3) print imgarr3.min(), imgarr3.max() pl.matshow(imgarr3) pl.colorbar() pl.show() #print imgarr2.min(), imgarr2.max()", "python \"\"\" see if normals are plausible by looking at their norm. \"\"\"", "import numpy as np import matplotlib.pyplot as pl img_fname = sys.argv[1] img =", "as np import matplotlib.pyplot as pl img_fname = sys.argv[1] img = Image.open(img_fname) imgarr", "their norm. \"\"\" import sys import Image import numpy as np import matplotlib.pyplot", "<reponame>panmari/seeing3d<filename>check_normals.py #!/usr/bin/env python \"\"\" see if normals are plausible by looking at their", "np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 = 2.*imgarr - 1. imgarr3 = (imgarr2*imgarr2).sum(2) mask =", "as pl img_fname = sys.argv[1] img = Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print imgarr.shape", "Image import numpy as np import matplotlib.pyplot as pl img_fname = sys.argv[1] img", "pl img_fname = sys.argv[1] img = Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print imgarr.shape imgarr2", "img_fname = sys.argv[1] img = Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 =", "1. imgarr3 = (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3 *= mask imgarr3 = np.sqrt(imgarr3)", "import Image import numpy as np import matplotlib.pyplot as pl img_fname = sys.argv[1]", "imgarr3 = (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3 *= mask imgarr3 = np.sqrt(imgarr3) print", "import sys import Image import numpy as np import matplotlib.pyplot as pl img_fname", "np import matplotlib.pyplot as pl img_fname = sys.argv[1] img = Image.open(img_fname) imgarr =", "by looking at their norm. \"\"\" import sys import Image import numpy as", "= np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 = 2.*imgarr - 1. imgarr3 = (imgarr2*imgarr2).sum(2) mask", "= (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3 *= mask imgarr3 = np.sqrt(imgarr3) print imgarr3.min(),", "sys.argv[1] img = Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 = 2.*imgarr -", "(imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3 *= mask imgarr3 = np.sqrt(imgarr3) print imgarr3.min(), imgarr3.max()", "= ~(imgarr3==3.0) imgarr3 *= mask imgarr3 = np.sqrt(imgarr3) print imgarr3.min(), imgarr3.max() pl.matshow(imgarr3) pl.colorbar()", "img = Image.open(img_fname) imgarr = np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 = 2.*imgarr - 1.", "imgarr.shape imgarr2 = 2.*imgarr - 1. imgarr3 = (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3", "plausible by looking at their norm. \"\"\" import sys import Image import numpy", "imgarr = np.array(img).astype(np.float)/255. print imgarr.shape imgarr2 = 2.*imgarr - 1. imgarr3 = (imgarr2*imgarr2).sum(2)", "norm. \"\"\" import sys import Image import numpy as np import matplotlib.pyplot as", "mask = ~(imgarr3==3.0) imgarr3 *= mask imgarr3 = np.sqrt(imgarr3) print imgarr3.min(), imgarr3.max() pl.matshow(imgarr3)", "imgarr2 = 2.*imgarr - 1. imgarr3 = (imgarr2*imgarr2).sum(2) mask = ~(imgarr3==3.0) imgarr3 *=" ]
[ "r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\l_1.txt' path = r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\F.txt' with open(path,'r',encoding='utf-8',errors='ignore') as f: string = '' # for", "f: # string += line.strip() # except: # continue string = f.read() print(len(string))", "path_save = r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\l_1.txt' path = r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\F.txt' with open(path,'r',encoding='utf-8',errors='ignore') as f: string = ''", "for line in f: # string += line.strip() # except: # continue string", "string = '' # for line in f: # string += line.strip() #", "= r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\F.txt' with open(path,'r',encoding='utf-8',errors='ignore') as f: string = '' # for line in", "'' # for line in f: # string += line.strip() # except: #", "path = r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\F.txt' with open(path,'r',encoding='utf-8',errors='ignore') as f: string = '' # for line", "+= line.strip() # except: # continue string = f.read() print(len(string)) g = open(path_save,'w')", "as f: string = '' # for line in f: # string +=", "line in f: # string += line.strip() # except: # continue string =", "# except: # continue string = f.read() print(len(string)) g = open(path_save,'w') g.write(string) g.close()", "= '' # for line in f: # string += line.strip() # except:", "with open(path,'r',encoding='utf-8',errors='ignore') as f: string = '' # for line in f: #", "open(path,'r',encoding='utf-8',errors='ignore') as f: string = '' # for line in f: # string", "= r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\l_1.txt' path = r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\F.txt' with open(path,'r',encoding='utf-8',errors='ignore') as f: string = '' #", "r'D:\\Data\\Class_data\\Alg_data\\FinalTask\\F.txt' with open(path,'r',encoding='utf-8',errors='ignore') as f: string = '' # for line in f:", "f: string = '' # for line in f: # string += line.strip()", "# for line in f: # string += line.strip() # except: # continue", "# string += line.strip() # except: # continue string = f.read() print(len(string)) g", "string += line.strip() # except: # continue string = f.read() print(len(string)) g =", "line.strip() # except: # continue string = f.read() print(len(string)) g = open(path_save,'w') g.write(string)", "in f: # string += line.strip() # except: # continue string = f.read()" ]
[ "y, w, h) # print(bbox) # print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2]))", "b) x, y, w, h = bb[0], bb[1], bb[2], bb[3] # print(x, y,", "xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if flag: # save file tree.write(savePath) in_file.close()", "os.path.splitext(xmlFilename) savePath = parentPath + os.sep + xf + '_reshape.xml' root.find('filename').text = xf", "skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg = img else: logger.error('Input error!') return in_file =", "int(resizeImgShape[1]) height = int(resizeImgShape[0]) size = root.find('size') size.find('width').text = str(width) size.find('height').text = str(height)", "y, w, h = bb[0], bb[1], bb[2], bb[3] # print(x, y, w, h)", "str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if flag: #", "img.shape resizedImg = skimage.transform.resize( img, (int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1]))) return np.array(resizedImg", "os import xml.etree.ElementTree as ET import numpy as np import skimage from convertmask.utils.methods.logger", "convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert", "'_reshape.xml' root.find('filename').text = xf + '_reshape.jpg' root.find('path').text = parentPath + os.sep + xf", "= int(resizeImgShape[1]) height = int(resizeImgShape[0]) size = root.find('size') size.find('width').text = str(width) size.find('height').text =", "elif isinstance(img, np.ndarray): oriImg = img else: logger.error('Input error!') return in_file = open(xmlpath)", "y2xVert def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str): img = skimage.io.imread(img) imgShape =", "convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert def resize_img(img: np.ndarray, heightFactor=1,", "= h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h) # print(bbox) # print('===========================')", "2020-11-20 14:12:40 ''' import os import xml.etree.ElementTree as ET import numpy as np", "numpy as np import skimage from convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo import convert", "int(resizeImgShape[0]) size = root.find('size') size.find('width').text = str(width) size.find('height').text = str(height) for obj in", "= x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y, w, h = bb[0], bb[1], bb[2], bb[3]", "savePath = parentPath + os.sep + xf + '_reshape.xml' root.find('filename').text = xf +", "isinstance(img,str): img = skimage.io.imread(img) imgShape = img.shape resizedImg = skimage.transform.resize( img, (int(heightFactor *", "in_file = open(xmlpath) tree = ET.parse(in_file) root = tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath)", "bb[3] # print(x, y, w, h) # w = w # h =", "w, h) # w = w # h = h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]),", "resize_img(oriImg, heightFactor, widthFactor) resizeImgShape = resizeImg.shape width = int(resizeImgShape[1]) height = int(resizeImgShape[0]) size", "LastEditTime: 2020-11-20 14:12:40 ''' import os import xml.etree.ElementTree as ET import numpy as", "x, y, w, h = bb[0], bb[1], bb[2], bb[3] # print(x, y, w,", "print(bbox) # print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text", "root.iter('object'): xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') #", "oriImg = skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg = img else: logger.error('Input error!') return", "error!') return in_file = open(xmlpath) tree = ET.parse(in_file) root = tree.getroot() parentPath, xmlFilename", "else: logger.error('Input error!') return in_file = open(xmlpath) tree = ET.parse(in_file) root = tree.getroot()", "def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str) and os.path.exists(img): oriImg =", "size.find('height').text = str(height) for obj in root.iter('object'): xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text),", "imgShape[0]), int(widthFactor * imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1):", "x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if", "logger.error('Input error!') return in_file = open(xmlpath) tree = ET.parse(in_file) root = tree.getroot() parentPath,", "+ os.sep + xf + '_reshape.xml' root.find('filename').text = xf + '_reshape.jpg' root.find('path').text =", "np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str): img = skimage.io.imread(img) imgShape = img.shape resizedImg =", "= (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') # print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]),", "convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert def resize_img(img:", "float(xmlbox.find('ymax').text)) # print('===========================') # print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y, w,", "h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h) # print(bbox) # print('===========================') xmlbox.find('xmin').text", "img = skimage.io.imread(img) imgShape = img.shape resizedImg = skimage.transform.resize( img, (int(heightFactor * imgShape[0]),", "<filename>convertmask/utils/auglib/optional/resize.py ''' lanhuage: python Descripttion: version: beta Author: xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors:", "= skimage.io.imread(img) imgShape = img.shape resizedImg = skimage.transform.resize( img, (int(heightFactor * imgShape[0]), int(widthFactor", "from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str):", "= resize_img(oriImg, heightFactor, widthFactor) resizeImgShape = resizeImg.shape width = int(resizeImgShape[1]) height = int(resizeImgShape[0])", "print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y, w, h = bb[0], bb[1],", "if flag: # save file tree.write(savePath) in_file.close() return resizeImg, savePath else: return tree", "as y2xVert def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str): img = skimage.io.imread(img) imgShape", "= img else: logger.error('Input error!') return in_file = open(xmlpath) tree = ET.parse(in_file) root", "convert as y2xVert def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str): img = skimage.io.imread(img)", "= y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h) # print(bbox) # print('===========================') xmlbox.find('xmin').text = str(int(bbox[0]))", "resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str) and os.path.exists(img): oriImg = skimage.io.imread(img)", "= os.path.splitext(xmlFilename) savePath = parentPath + os.sep + xf + '_reshape.xml' root.find('filename').text =", "str(width) size.find('height').text = str(height) for obj in root.iter('object'): xmlbox = obj.find('bndbox') b =", "+ os.sep + xf + '_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor, widthFactor) resizeImgShape =", "'_reshape.jpg' root.find('path').text = parentPath + os.sep + xf + '_reshape.jpg' resizeImg = resize_img(oriImg,", "= int(resizeImgShape[0]) size = root.find('size') size.find('width').text = str(width) size.find('height').text = str(height) for obj", "print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3]))", "xml.etree.ElementTree as ET import numpy as np import skimage from convertmask.utils.methods.logger import logger", "as ET import numpy as np import skimage from convertmask.utils.methods.logger import logger from", "= str(height) for obj in root.iter('object'): xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text),", "obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') # print(b) bb =", "y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h) # print(bbox) # print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text", "xmlFilename = os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename) savePath = parentPath + os.sep +", "_ = os.path.splitext(xmlFilename) savePath = parentPath + os.sep + xf + '_reshape.xml' root.find('filename').text", "print('===========================') # print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y, w, h =", "ET.parse(in_file) root = tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename) savePath", "# pass def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str) and os.path.exists(img):", "xmlbox.find('ymax').text = str(int(bbox[3])) if flag: # save file tree.write(savePath) in_file.close() return resizeImg, savePath", "size.find('width').text = str(width) size.find('height').text = str(height) for obj in root.iter('object'): xmlbox = obj.find('bndbox')", "heightFactor=1, widthFactor=1): if isinstance(img,str): img = skimage.io.imread(img) imgShape = img.shape resizedImg = skimage.transform.resize(", "14:12:40 ''' import os import xml.etree.ElementTree as ET import numpy as np import", "in root.iter('object'): xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================')", "h) # print(bbox) # print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text =", "b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') # print(b) bb = x2yVert((oriImg.shape[1],", "w, h) # print(bbox) # print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text", "root.find('path').text = parentPath + os.sep + xf + '_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor,", "bb[0], bb[1], bb[2], bb[3] # print(x, y, w, h) # w = w", "(float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') # print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b)", "# print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text =", "python Descripttion: version: beta Author: xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20", "''' lanhuage: python Descripttion: version: beta Author: xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui", "and os.path.exists(img): oriImg = skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg = img else: logger.error('Input", "heightFactor=1, widthFactor=1): # pass def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str)", "parentPath, xmlFilename = os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename) savePath = parentPath + os.sep", "heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str) and os.path.exists(img): oriImg = skimage.io.imread(img) elif isinstance(img, np.ndarray):", "+ '_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor, widthFactor) resizeImgShape = resizeImg.shape width = int(resizeImgShape[1])", "= img.shape resizedImg = skimage.transform.resize( img, (int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1]))) return", "xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') # print(b)", "255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass def resizeScript(img, xmlpath: str, heightFactor=1,", "def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if", "img else: logger.error('Input error!') return in_file = open(xmlpath) tree = ET.parse(in_file) root =", "isinstance(img, str) and os.path.exists(img): oriImg = skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg = img", "import xml.etree.ElementTree as ET import numpy as np import skimage from convertmask.utils.methods.logger import", "2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40 ''' import os import xml.etree.ElementTree as", "w, h = bb[0], bb[1], bb[2], bb[3] # print(x, y, w, h) #", "bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h) # print(bbox) # print('===========================') xmlbox.find('xmin').text =", "Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40 ''' import os import xml.etree.ElementTree", "= ET.parse(in_file) root = tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename)", "h) # w = w # h = h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x,", "str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if flag: # save file tree.write(savePath)", "height = int(resizeImgShape[0]) size = root.find('size') size.find('width').text = str(width) size.find('height').text = str(height) for", "float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') # print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x,", "widthFactor=1,flag=True): if isinstance(img, str) and os.path.exists(img): oriImg = skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg", "= root.find('size') size.find('width').text = str(width) size.find('height').text = str(height) for obj in root.iter('object'): xmlbox", "= resizeImg.shape width = int(resizeImgShape[1]) height = int(resizeImgShape[0]) size = root.find('size') size.find('width').text =", "if isinstance(img, str) and os.path.exists(img): oriImg = skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg =", "import logger from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert as", "# w = w # h = h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y,", "+ '_reshape.jpg' root.find('path').text = parentPath + os.sep + xf + '_reshape.jpg' resizeImg =", "print(x, y, w, h) # w = w # h = h bbox", "os.sep + xf + '_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor, widthFactor) resizeImgShape = resizeImg.shape", "y, w, h) # w = w # h = h bbox =", "as x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1):", "return in_file = open(xmlpath) tree = ET.parse(in_file) root = tree.getroot() parentPath, xmlFilename =", "resizeImgShape = resizeImg.shape width = int(resizeImgShape[1]) height = int(resizeImgShape[0]) size = root.find('size') size.find('width').text", "version: beta Author: xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40 '''", "imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass def", "+ '_reshape.xml' root.find('filename').text = xf + '_reshape.jpg' root.find('path').text = parentPath + os.sep +", "widthFactor=1): # pass def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str) and", "= bb[0], bb[1], bb[2], bb[3] # print(x, y, w, h) # w =", "h = h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h) # print(bbox) #", "x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y, w, h = bb[0], bb[1], bb[2], bb[3] #", "= w # h = h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h)", "widthFactor=1): if isinstance(img,str): img = skimage.io.imread(img) imgShape = img.shape resizedImg = skimage.transform.resize( img,", "skimage from convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml", "= skimage.transform.resize( img, (int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8)", "str) and os.path.exists(img): oriImg = skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg = img else:", "xf + '_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor, widthFactor) resizeImgShape = resizeImg.shape width =", "'_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor, widthFactor) resizeImgShape = resizeImg.shape width = int(resizeImgShape[1]) height", "xf + '_reshape.xml' root.find('filename').text = xf + '_reshape.jpg' root.find('path').text = parentPath + os.sep", "import convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert def resize_img(img: np.ndarray,", "heightFactor, widthFactor) resizeImgShape = resizeImg.shape width = int(resizeImgShape[1]) height = int(resizeImgShape[0]) size =", "= parentPath + os.sep + xf + '_reshape.xml' root.find('filename').text = xf + '_reshape.jpg'", "str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str) and os.path.exists(img): oriImg = skimage.io.imread(img) elif isinstance(img,", "size = root.find('size') size.find('width').text = str(width) size.find('height').text = str(height) for obj in root.iter('object'):", "oriImg.shape[0]), b) x, y, w, h = bb[0], bb[1], bb[2], bb[3] # print(x,", "# print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y, w, h = bb[0],", "from convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml import", "xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if", "= tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename) savePath = parentPath", "root.find('filename').text = xf + '_reshape.jpg' root.find('path').text = parentPath + os.sep + xf +", "* imgShape[0]), int(widthFactor * imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1,", "open(xmlpath) tree = ET.parse(in_file) root = tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath) xf, _", "= os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename) savePath = parentPath + os.sep + xf", "+ xf + '_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor, widthFactor) resizeImgShape = resizeImg.shape width", "# h = h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h) # print(bbox)", "''' import os import xml.etree.ElementTree as ET import numpy as np import skimage", "if isinstance(img,str): img = skimage.io.imread(img) imgShape = img.shape resizedImg = skimage.transform.resize( img, (int(heightFactor", "str(int(bbox[3])) if flag: # save file tree.write(savePath) in_file.close() return resizeImg, savePath else: return", "= parentPath + os.sep + xf + '_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor, widthFactor)", "= xf + '_reshape.jpg' root.find('path').text = parentPath + os.sep + xf + '_reshape.jpg'", "resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img,", "root.find('size') size.find('width').text = str(width) size.find('height').text = str(height) for obj in root.iter('object'): xmlbox =", "parentPath + os.sep + xf + '_reshape.jpg' resizeImg = resize_img(oriImg, heightFactor, widthFactor) resizeImgShape", "from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert def", "= str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if flag:", "(int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8) # def resize_xml(xmlpath:str,", "beta Author: xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40 ''' import", "np.ndarray): oriImg = img else: logger.error('Input error!') return in_file = open(xmlpath) tree =", "tree = ET.parse(in_file) root = tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath) xf, _ =", "for obj in root.iter('object'): xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))", "= str(width) size.find('height').text = str(height) for obj in root.iter('object'): xmlbox = obj.find('bndbox') b", "str(height) for obj in root.iter('object'): xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),", "# print('===========================') # print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y, w, h", "return np.array(resizedImg * 255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass def resizeScript(img,", "oriImg = img else: logger.error('Input error!') return in_file = open(xmlpath) tree = ET.parse(in_file)", "xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if flag: # save", "pass def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str) and os.path.exists(img): oriImg", "widthFactor) resizeImgShape = resizeImg.shape width = int(resizeImgShape[1]) height = int(resizeImgShape[0]) size = root.find('size')", "xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40 ''' import os import", "+ xf + '_reshape.xml' root.find('filename').text = xf + '_reshape.jpg' root.find('path').text = parentPath +", "isinstance(img, np.ndarray): oriImg = img else: logger.error('Input error!') return in_file = open(xmlpath) tree", "= obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') # print(b) bb", "LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40 ''' import os import xml.etree.ElementTree as ET import", "xmlpath: str, heightFactor=1, widthFactor=1,flag=True): if isinstance(img, str) and os.path.exists(img): oriImg = skimage.io.imread(img) elif", "np.array(resizedImg * 255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass def resizeScript(img, xmlpath:", "x, y, w, h) # print(bbox) # print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text =", "h = bb[0], bb[1], bb[2], bb[3] # print(x, y, w, h) # w", "w = w # h = h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w,", "resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str): img = skimage.io.imread(img) imgShape = img.shape resizedImg", "= str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if flag: # save file", "import skimage from convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert from", "def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str): img = skimage.io.imread(img) imgShape = img.shape", "Author: xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40 ''' import os", "# def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True):", "root = tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename) savePath =", "= str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if flag: # save file tree.write(savePath) in_file.close() return", "skimage.io.imread(img) imgShape = img.shape resizedImg = skimage.transform.resize( img, (int(heightFactor * imgShape[0]), int(widthFactor *", "= open(xmlpath) tree = ET.parse(in_file) root = tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath) xf,", "# print(x, y, w, h) # w = w # h = h", "bb[2], bb[3] # print(x, y, w, h) # w = w # h", "= str(int(bbox[3])) if flag: # save file tree.write(savePath) in_file.close() return resizeImg, savePath else:", "* imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass", "int(widthFactor * imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): #", "08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40 ''' import os import xml.etree.ElementTree as ET", "Descripttion: version: beta Author: xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime: 2020-11-20 14:12:40", "convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str): img", "bb[1], bb[2], bb[3] # print(x, y, w, h) # w = w #", "skimage.transform.resize( img, (int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8) #", "= skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg = img else: logger.error('Input error!') return in_file", "xf + '_reshape.jpg' root.find('path').text = parentPath + os.sep + xf + '_reshape.jpg' resizeImg", "resizeImg = resize_img(oriImg, heightFactor, widthFactor) resizeImgShape = resizeImg.shape width = int(resizeImgShape[1]) height =", "xf, _ = os.path.splitext(xmlFilename) savePath = parentPath + os.sep + xf + '_reshape.xml'", "lanhuage: python Descripttion: version: beta Author: xiaoshuyui Date: 2020-10-26 08:31:13 LastEditors: xiaoshuyui LastEditTime:", "import convert as y2xVert def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1): if isinstance(img,str): img =", "np import skimage from convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert", "float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) # print('===========================') # print(b) bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y,", "bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b) x, y, w, h = bb[0], bb[1], bb[2],", "obj in root.iter('object'): xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) #", "import numpy as np import skimage from convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo import", "os.sep + xf + '_reshape.xml' root.find('filename').text = xf + '_reshape.jpg' root.find('path').text = parentPath", "import os import xml.etree.ElementTree as ET import numpy as np import skimage from", "os.path.exists(img): oriImg = skimage.io.imread(img) elif isinstance(img, np.ndarray): oriImg = img else: logger.error('Input error!')", "as np import skimage from convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo import convert as", "logger from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert", "xiaoshuyui LastEditTime: 2020-11-20 14:12:40 ''' import os import xml.etree.ElementTree as ET import numpy", "img, (int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1]))) return np.array(resizedImg * 255).astype(np.uint8) # def", "resizedImg = skimage.transform.resize( img, (int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1]))) return np.array(resizedImg *", "width = int(resizeImgShape[1]) height = int(resizeImgShape[0]) size = root.find('size') size.find('width').text = str(width) size.find('height').text", "parentPath + os.sep + xf + '_reshape.xml' root.find('filename').text = xf + '_reshape.jpg' root.find('path').text", "w # h = h bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h) #", "ET import numpy as np import skimage from convertmask.utils.methods.logger import logger from convertmask.utils.xml2yolo.xml2yolo", "str(int(bbox[1])) xmlbox.find('ymax').text = str(int(bbox[3])) if flag: # save file tree.write(savePath) in_file.close() return resizeImg,", "imgShape = img.shape resizedImg = skimage.transform.resize( img, (int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1])))", "os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename) savePath = parentPath + os.sep + xf +", "* 255).astype(np.uint8) # def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1): # pass def resizeScript(img, xmlpath: str,", "tree.getroot() parentPath, xmlFilename = os.path.split(xmlpath) xf, _ = os.path.splitext(xmlFilename) savePath = parentPath +", "resizeImg.shape width = int(resizeImgShape[1]) height = int(resizeImgShape[0]) size = root.find('size') size.find('width').text = str(width)", "# print(bbox) # print('===========================') xmlbox.find('xmin').text = str(int(bbox[0])) xmlbox.find('ymin').text = str(int(bbox[2])) xmlbox.find('xmax').text = str(int(bbox[1]))" ]
[ "gzip import shutil import logging logger = logging.getLogger(__name__) class Image: def __init__(self, client,", "is None and tag == 'latest': force = True logger.info(\"Tagging image {image} as", "if compress: open_func = gzip.open else: open_func = open with open_func(path, 'wb') as", "**kwargs) def get(self): if self.repository_tag: return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def save(self, path,", "shutil import logging logger = logging.getLogger(__name__) class Image: def __init__(self, client, id): self.client", "return self.repository_tag or self.id[:12] def tag(self, repository, tag=None, force=None, **kwargs): if tag is", "id self.repository_tag = None def __str__(self): return self.repository_tag or self.id[:12] def tag(self, repository,", "None and tag == 'latest': force = True logger.info(\"Tagging image {image} as {repository}:{tag}...\".format(", "self.repository_tag = ':'.join((repository, tag)) return self def remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id,", "= True logger.info(\"Tagging image {image} as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag )) self.client.tag( self.id,", "get(self): if self.repository_tag: return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def save(self, path, compress=False): logger.info(\"Saving", "logger = logging.getLogger(__name__) class Image: def __init__(self, client, id): self.client = client self.id", "= None def __str__(self): return self.repository_tag or self.id[:12] def tag(self, repository, tag=None, force=None,", "client, id): self.client = client self.id = id self.repository_tag = None def __str__(self):", "logger.info(\"Tagging image {image} as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag )) self.client.tag( self.id, repository=repository, tag=tag,", "= gzip.open else: open_func = open with open_func(path, 'wb') as output: shutil.copyfileobj(self.get(), output)", "image {image} to: {file}\".format( image=self, file=path )) if compress: open_func = gzip.open else:", "force=force, **kwargs ) self.repository_tag = ':'.join((repository, tag)) return self def remove(self, **kwargs): logger.info(\"Removing", "self.repository_tag: return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def save(self, path, compress=False): logger.info(\"Saving image {image}", "as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag )) self.client.tag( self.id, repository=repository, tag=tag, force=force, **kwargs )", "self.client = client self.id = id self.repository_tag = None def __str__(self): return self.repository_tag", "force is None and tag == 'latest': force = True logger.info(\"Tagging image {image}", "self.client.remove_image(self.id, **kwargs) def get(self): if self.repository_tag: return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def save(self,", "= 'latest' if force is None and tag == 'latest': force = True", ")) if compress: open_func = gzip.open else: open_func = open with open_func(path, 'wb')", "class Image: def __init__(self, client, id): self.client = client self.id = id self.repository_tag", "{}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self): if self.repository_tag: return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def", "if force is None and tag == 'latest': force = True logger.info(\"Tagging image", ")) self.client.tag( self.id, repository=repository, tag=tag, force=force, **kwargs ) self.repository_tag = ':'.join((repository, tag)) return", "Image: def __init__(self, client, id): self.client = client self.id = id self.repository_tag =", "image=self, file=path )) if compress: open_func = gzip.open else: open_func = open with", "repository=repository, tag=tag, force=force, **kwargs ) self.repository_tag = ':'.join((repository, tag)) return self def remove(self,", "is None: tag = 'latest' if force is None and tag == 'latest':", "repository, tag=None, force=None, **kwargs): if tag is None: tag = 'latest' if force", "logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self): if self.repository_tag: return self.client.get_image(self.repository_tag) else: return", "gzip.open else: open_func = open with open_func(path, 'wb') as output: shutil.copyfileobj(self.get(), output) return", "{file}\".format( image=self, file=path )) if compress: open_func = gzip.open else: open_func = open", "None def __str__(self): return self.repository_tag or self.id[:12] def tag(self, repository, tag=None, force=None, **kwargs):", "if tag is None: tag = 'latest' if force is None and tag", "or self.id[:12] def tag(self, repository, tag=None, force=None, **kwargs): if tag is None: tag", "file=path )) if compress: open_func = gzip.open else: open_func = open with open_func(path,", "else: return self.client.get_image(self.id) def save(self, path, compress=False): logger.info(\"Saving image {image} to: {file}\".format( image=self,", "logging logger = logging.getLogger(__name__) class Image: def __init__(self, client, id): self.client = client", "return self def remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self): if", "':'.join((repository, tag)) return self def remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def", "self.repository_tag or self.id[:12] def tag(self, repository, tag=None, force=None, **kwargs): if tag is None:", "and tag == 'latest': force = True logger.info(\"Tagging image {image} as {repository}:{tag}...\".format( image=self,", "tag is None: tag = 'latest' if force is None and tag ==", "**kwargs ) self.repository_tag = ':'.join((repository, tag)) return self def remove(self, **kwargs): logger.info(\"Removing image:", "self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def save(self, path, compress=False): logger.info(\"Saving image {image} to: {file}\".format(", "self.id = id self.repository_tag = None def __str__(self): return self.repository_tag or self.id[:12] def", "tag == 'latest': force = True logger.info(\"Tagging image {image} as {repository}:{tag}...\".format( image=self, repository=repository,", "path, compress=False): logger.info(\"Saving image {image} to: {file}\".format( image=self, file=path )) if compress: open_func", "== 'latest': force = True logger.info(\"Tagging image {image} as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag", "= client self.id = id self.repository_tag = None def __str__(self): return self.repository_tag or", "True logger.info(\"Tagging image {image} as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag )) self.client.tag( self.id, repository=repository,", ") self.repository_tag = ':'.join((repository, tag)) return self def remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self))", "tag=tag, force=force, **kwargs ) self.repository_tag = ':'.join((repository, tag)) return self def remove(self, **kwargs):", "= ':'.join((repository, tag)) return self def remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs)", "self.repository_tag = None def __str__(self): return self.repository_tag or self.id[:12] def tag(self, repository, tag=None,", "{repository}:{tag}...\".format( image=self, repository=repository, tag=tag )) self.client.tag( self.id, repository=repository, tag=tag, force=force, **kwargs ) self.repository_tag", "__str__(self): return self.repository_tag or self.id[:12] def tag(self, repository, tag=None, force=None, **kwargs): if tag", "= logging.getLogger(__name__) class Image: def __init__(self, client, id): self.client = client self.id =", "self.client.tag( self.id, repository=repository, tag=tag, force=force, **kwargs ) self.repository_tag = ':'.join((repository, tag)) return self", "def __init__(self, client, id): self.client = client self.id = id self.repository_tag = None", "logging.getLogger(__name__) class Image: def __init__(self, client, id): self.client = client self.id = id", "image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self): if self.repository_tag: return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id)", "if self.repository_tag: return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def save(self, path, compress=False): logger.info(\"Saving image", "import logging logger = logging.getLogger(__name__) class Image: def __init__(self, client, id): self.client =", "remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self): if self.repository_tag: return self.client.get_image(self.repository_tag)", "<filename>docker_loader/image.py<gh_stars>1-10 import gzip import shutil import logging logger = logging.getLogger(__name__) class Image: def", "compress: open_func = gzip.open else: open_func = open with open_func(path, 'wb') as output:", "compress=False): logger.info(\"Saving image {image} to: {file}\".format( image=self, file=path )) if compress: open_func =", "tag(self, repository, tag=None, force=None, **kwargs): if tag is None: tag = 'latest' if", "{image} to: {file}\".format( image=self, file=path )) if compress: open_func = gzip.open else: open_func", "client self.id = id self.repository_tag = None def __str__(self): return self.repository_tag or self.id[:12]", "else: open_func = open with open_func(path, 'wb') as output: shutil.copyfileobj(self.get(), output) return self", "__init__(self, client, id): self.client = client self.id = id self.repository_tag = None def", "return self.client.get_image(self.id) def save(self, path, compress=False): logger.info(\"Saving image {image} to: {file}\".format( image=self, file=path", "self.client.get_image(self.id) def save(self, path, compress=False): logger.info(\"Saving image {image} to: {file}\".format( image=self, file=path ))", "**kwargs): if tag is None: tag = 'latest' if force is None and", "def tag(self, repository, tag=None, force=None, **kwargs): if tag is None: tag = 'latest'", "repository=repository, tag=tag )) self.client.tag( self.id, repository=repository, tag=tag, force=force, **kwargs ) self.repository_tag = ':'.join((repository,", "force = True logger.info(\"Tagging image {image} as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag )) self.client.tag(", "self.id[:12] def tag(self, repository, tag=None, force=None, **kwargs): if tag is None: tag =", "save(self, path, compress=False): logger.info(\"Saving image {image} to: {file}\".format( image=self, file=path )) if compress:", "tag)) return self def remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self):", "None: tag = 'latest' if force is None and tag == 'latest': force", "return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def save(self, path, compress=False): logger.info(\"Saving image {image} to:", "'latest': force = True logger.info(\"Tagging image {image} as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag ))", "{image} as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag )) self.client.tag( self.id, repository=repository, tag=tag, force=force, **kwargs", "tag=None, force=None, **kwargs): if tag is None: tag = 'latest' if force is", "def __str__(self): return self.repository_tag or self.id[:12] def tag(self, repository, tag=None, force=None, **kwargs): if", "def save(self, path, compress=False): logger.info(\"Saving image {image} to: {file}\".format( image=self, file=path )) if", "logger.info(\"Saving image {image} to: {file}\".format( image=self, file=path )) if compress: open_func = gzip.open", "= id self.repository_tag = None def __str__(self): return self.repository_tag or self.id[:12] def tag(self,", "to: {file}\".format( image=self, file=path )) if compress: open_func = gzip.open else: open_func =", "force=None, **kwargs): if tag is None: tag = 'latest' if force is None", "tag=tag )) self.client.tag( self.id, repository=repository, tag=tag, force=force, **kwargs ) self.repository_tag = ':'.join((repository, tag))", "image=self, repository=repository, tag=tag )) self.client.tag( self.id, repository=repository, tag=tag, force=force, **kwargs ) self.repository_tag =", "def get(self): if self.repository_tag: return self.client.get_image(self.repository_tag) else: return self.client.get_image(self.id) def save(self, path, compress=False):", "self.id, repository=repository, tag=tag, force=force, **kwargs ) self.repository_tag = ':'.join((repository, tag)) return self def", "tag = 'latest' if force is None and tag == 'latest': force =", "**kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self): if self.repository_tag: return self.client.get_image(self.repository_tag) else:", "self def remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self): if self.repository_tag:", "open_func = gzip.open else: open_func = open with open_func(path, 'wb') as output: shutil.copyfileobj(self.get(),", "import gzip import shutil import logging logger = logging.getLogger(__name__) class Image: def __init__(self,", "def remove(self, **kwargs): logger.info(\"Removing image: {}\".format(self)) self.client.remove_image(self.id, **kwargs) def get(self): if self.repository_tag: return", "import shutil import logging logger = logging.getLogger(__name__) class Image: def __init__(self, client, id):", "image {image} as {repository}:{tag}...\".format( image=self, repository=repository, tag=tag )) self.client.tag( self.id, repository=repository, tag=tag, force=force,", "id): self.client = client self.id = id self.repository_tag = None def __str__(self): return", "'latest' if force is None and tag == 'latest': force = True logger.info(\"Tagging" ]
[ "that has been made on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True,", "CommentReply(models.Model): \"\"\" Handles replying on a specific comment by made on an article", "CommentReplyLike(models.Model): \"\"\" Holds data for liking reply made a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE)", "\"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\"", "receiver from django.db.models.signals import pre_save from django.db import models from authors.apps.articles.models import Article", "from django.dispatch import receiver from django.db.models.signals import pre_save from django.db import models from", "ordering=['repliedOn'] def __str__(self): return self.reply_body class CommentLike(models.Model): \"\"\" Handles liking of a specific", "django.dispatch import receiver from django.db.models.signals import pre_save from django.db import models from authors.apps.articles.models", "return self.reply_body class CommentLike(models.Model): \"\"\" Handles liking of a specific user by an", "import receiver from django.db.models.signals import pre_save from django.db import models from authors.apps.articles.models import", "comment_history = HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self): return self.body class CommentReply(models.Model): \"\"\"", "def __str__(self): return self.reply_body class CommentLike(models.Model): \"\"\" Handles liking of a specific user", "pre_save from django.db import models from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile", "self.body class CommentReply(models.Model): \"\"\" Handles replying on a specific comment by made on", "on a specific comment by made on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True)", "highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class Meta:", "simple_history.models import HistoricalRecords class Comment(models.Model): \"\"\" Handles CRUD on a comment that has", "specific comment by made on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE)", "Handles liking of a specific user by an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField()", "import models from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile from simple_history.models import", "comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self):", "return self.body class CommentReply(models.Model): \"\"\" Handles replying on a specific comment by made", "Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body class CommentLike(models.Model): \"\"\" Handles liking of a", "authors.apps.profiles.models import Profile from simple_history.models import HistoricalRecords class Comment(models.Model): \"\"\" Handles CRUD on", "class CommentLike(models.Model): \"\"\" Handles liking of a specific user by an authenticated user", "like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds data", "def __str__(self): return \"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds data for liking", "Handles CRUD on a comment that has been made on article \"\"\" body=models.TextField(max_length=500)", "CommentLike(models.Model): \"\"\" Handles liking of a specific user by an authenticated user \"\"\"", "Handles replying on a specific comment by made on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies')", "class Comment(models.Model): \"\"\" Handles CRUD on a comment that has been made on", "on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class", "for liking reply made a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE) def __str__(self): return", "an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like by {}\".format(self.liked_by)", "models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords()", "has been made on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True)", "class Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body class CommentLike(models.Model): \"\"\" Handles liking of", "made a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE) def __str__(self): return \"reply liked by", "an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta:", "import pre_save from django.db import models from authors.apps.articles.models import Article from authors.apps.profiles.models import", "highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history", "\"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds data for liking reply made a", "return \"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds data for liking reply made", "__str__(self): return \"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds data for liking reply", "comment that has been made on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start =", "= models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class Meta: ordering=['-createdAt']", "= HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self): return self.body class CommentReply(models.Model): \"\"\" Handles", "replying on a specific comment by made on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField()", "def __str__(self): return self.body class CommentReply(models.Model): \"\"\" Handles replying on a specific comment", "user by an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like", "related_name='article') comment_history = HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self): return self.body class CommentReply(models.Model):", "from django.db import models from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile from", "__str__(self): return self.reply_body class CommentLike(models.Model): \"\"\" Handles liking of a specific user by", "Meta: ordering=['-createdAt'] def __str__(self): return self.body class CommentReply(models.Model): \"\"\" Handles replying on a", "blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article')", "Comment(models.Model): \"\"\" Handles CRUD on a comment that has been made on article", "import Profile from simple_history.models import HistoricalRecords class Comment(models.Model): \"\"\" Handles CRUD on a", "a specific comment by made on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True)", "reply made a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE) def __str__(self): return \"reply liked", "comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds", "\"\"\" Holds data for liking reply made a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE)", "by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds data for liking reply made a comment", "django.db import models from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile from simple_history.models", "Profile from simple_history.models import HistoricalRecords class Comment(models.Model): \"\"\" Handles CRUD on a comment", "null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self):", "by an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like by", "\"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text", "from simple_history.models import HistoricalRecords class Comment(models.Model): \"\"\" Handles CRUD on a comment that", "CRUD on a comment that has been made on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True)", "from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile from simple_history.models import HistoricalRecords class", "__str__(self): return self.body class CommentReply(models.Model): \"\"\" Handles replying on a specific comment by", "specific user by an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return", "reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self): return", "models from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile from simple_history.models import HistoricalRecords", "a comment that has been made on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start", "createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500,", "{}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds data for liking reply made a comment \"\"\"", "models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class Meta: ordering=['-createdAt'] def", "from authors.apps.profiles.models import Profile from simple_history.models import HistoricalRecords class Comment(models.Model): \"\"\" Handles CRUD", "class CommentReply(models.Model): \"\"\" Handles replying on a specific comment by made on an", "reply_history = HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body class CommentLike(models.Model): \"\"\"", "ordering=['-createdAt'] def __str__(self): return self.body class CommentReply(models.Model): \"\"\" Handles replying on a specific", "article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta: ordering=['repliedOn']", "on a comment that has been made on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True)", "highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE,", "data for liking reply made a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE) def __str__(self):", "been made on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end", "\"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta: ordering=['repliedOn'] def", "= HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body class CommentLike(models.Model): \"\"\" Handles", "liking reply made a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE) def __str__(self): return \"reply", "made on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords()", "author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body class CommentLike(models.Model):", "authors.apps.articles.models import Article from authors.apps.profiles.models import Profile from simple_history.models import HistoricalRecords class Comment(models.Model):", "article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True)", "Article from authors.apps.profiles.models import Profile from simple_history.models import HistoricalRecords class Comment(models.Model): \"\"\" Handles", "updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True)", "\"\"\" Handles liking of a specific user by an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE)", "\"\"\" Handles CRUD on a comment that has been made on article \"\"\"", "models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE,", "a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE) def __str__(self): return \"reply liked by {}\".format(self.reply_like_by)", "authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like by {}\".format(self.liked_by) class", "a specific user by an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self):", "on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True,", "HistoricalRecords class Comment(models.Model): \"\"\" Handles CRUD on a comment that has been made", "HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self): return self.body class CommentReply(models.Model): \"\"\" Handles replying", "updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body class", "class Meta: ordering=['-createdAt'] def __str__(self): return self.body class CommentReply(models.Model): \"\"\" Handles replying on", "class CommentReplyLike(models.Model): \"\"\" Holds data for liking reply made a comment \"\"\" liked=models.BooleanField()", "= models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by')", "HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body class CommentLike(models.Model): \"\"\" Handles liking", "of a specific user by an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def", "Holds data for liking reply made a comment \"\"\" liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE) def", "import Article from authors.apps.profiles.models import Profile from simple_history.models import HistoricalRecords class Comment(models.Model): \"\"\"", "author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self): return", "by made on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history =", "article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self): return self.body class", "django.db.models.signals import pre_save from django.db import models from authors.apps.articles.models import Article from authors.apps.profiles.models", "liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model): \"\"\" Holds data for", "blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class", "from django.db.models.signals import pre_save from django.db import models from authors.apps.articles.models import Article from", "import HistoricalRecords class Comment(models.Model): \"\"\" Handles CRUD on a comment that has been", "= models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history =", "comment by made on an article \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history", "repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body", "user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return \"like by {}\".format(self.liked_by) class CommentReplyLike(models.Model):", "self.reply_body class CommentLike(models.Model): \"\"\" Handles liking of a specific user by an authenticated", "made on article \"\"\" body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end =", "\"\"\" Handles replying on a specific comment by made on an article \"\"\"", "related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self): return self.body", "liking of a specific user by an authenticated user \"\"\" comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE)", "body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text =" ]
[ "for epoch in range(MAX_EPOCH): for batch in range(data.batch_total): i, l = sess.run([ibatch, lbatch])", "'rb') as ff: for i,d in enumerate(ff): if only_path: path = d[:-1] label", "label_queue], batch_size=self.batch_size) return img_batch, label_batch if __name__ == '__main__': IMG_WIDTH = 1200 IMG_HEIGHT", "img_height self.batch_total = 0 def load_csv(self, csv_path): paths = [] labels = []", "= data.run(csv_path=img_path) with tf.Session() as sess: coord = tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord)", "tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return", "= sess.run([ibatch, lbatch]) print '{}/{}, {}/{}: {},{}'.format(batch, data.batch_total, epoch, MAX_EPOCH, len(l), i.shape) coord.request_stop()", "tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch =", "self.img_w = img_width self.img_h = img_height self.batch_total = 0 def load_csv(self, csv_path): paths", "= [] labels = [] only_path = True with open(csv_path, 'rb') as ff:", "= loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path) with tf.Session() as sess: coord", "batch_size self.img_w = img_width self.img_h = img_height self.batch_total = 0 def load_csv(self, csv_path):", "[] only_path = True with open(csv_path, 'rb') as ff: for i,d in enumerate(ff):", "labels_list = self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content = tf.read_file(img_queue) img_data", "= 1200 IMG_HEIGHT = 1600 MAX_EPOCH = 1000 img_path = '/home/kcadmin/datasets/img_list.csv' data =", "1000 img_path = '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path)", "epoch in range(MAX_EPOCH): for batch in range(data.batch_total): i, l = sess.run([ibatch, lbatch]) print", "import numpy as np import tensorflow as tf class loader(object): def __init__(self, batch_size=1,", "img_width=0, img_height=0): self.batch_size = batch_size self.img_w = img_width self.img_h = img_height self.batch_total =", "paths_list, labels_list = self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content = tf.read_file(img_queue)", "tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in range(MAX_EPOCH): for batch in range(data.batch_total): i, l =", "self.img_h = img_height self.batch_total = 0 def load_csv(self, csv_path): paths = [] labels", "batch in range(data.batch_total): i, l = sess.run([ibatch, lbatch]) print '{}/{}, {}/{}: {},{}'.format(batch, data.batch_total,", "in range(MAX_EPOCH): for batch in range(data.batch_total): i, l = sess.run([ibatch, lbatch]) print '{}/{},", "batch_size=1, img_width=0, img_height=0): self.batch_size = batch_size self.img_w = img_width self.img_h = img_height self.batch_total", "+ 1 return paths, labels def run(self, csv_path): paths_list, labels_list = self.load_csv(csv_path) img_queue,", "tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize)", "# _*_ coding: utf-8 _*_ import os import time import numpy as np", "1600 MAX_EPOCH = 1000 img_path = '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch,", "img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content = tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3)", "img_width self.img_h = img_height self.batch_total = 0 def load_csv(self, csv_path): paths = []", "img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard, label_queue],", "tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch, label_batch if __name__ ==", "tf class loader(object): def __init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size = batch_size self.img_w =", "path = path[:-1] paths.append(path) labels.append(label) self.batch_total = len(labels) // self.batch_size + 1 return", "img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch, label_batch if", "= tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size)", "in enumerate(ff): if only_path: path = d[:-1] label = i else: path, label", "data.run(csv_path=img_path) with tf.Session() as sess: coord = tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord) for", "= img_height self.batch_total = 0 def load_csv(self, csv_path): paths = [] labels =", "run(self, csv_path): paths_list, labels_list = self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content", "return img_batch, label_batch if __name__ == '__main__': IMG_WIDTH = 1200 IMG_HEIGHT = 1600", "def load_csv(self, csv_path): paths = [] labels = [] only_path = True with", "img_batch, label_batch if __name__ == '__main__': IMG_WIDTH = 1200 IMG_HEIGHT = 1600 MAX_EPOCH", "_*_ coding: utf-8 _*_ import os import time import numpy as np import", "img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path) with tf.Session() as sess: coord = tf.train.Coordinator() thread", "sess: coord = tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in range(MAX_EPOCH): for", "self.batch_total = len(labels) // self.batch_size + 1 return paths, labels def run(self, csv_path):", "[self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch,", "coding: utf-8 _*_ import os import time import numpy as np import tensorflow", "thread = tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in range(MAX_EPOCH): for batch in range(data.batch_total): i,", "= tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content = tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize =", "label_batch if __name__ == '__main__': IMG_WIDTH = 1200 IMG_HEIGHT = 1600 MAX_EPOCH =", "shuffle=True) img_content = tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h])", "= d[:-1] label = i else: path, label = d.split(',') path = path[:-1]", "as tf class loader(object): def __init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size = batch_size self.img_w", "img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch,", "coord=coord) for epoch in range(MAX_EPOCH): for batch in range(data.batch_total): i, l = sess.run([ibatch,", "__name__ == '__main__': IMG_WIDTH = 1200 IMG_HEIGHT = 1600 MAX_EPOCH = 1000 img_path", "path[:-1] paths.append(path) labels.append(label) self.batch_total = len(labels) // self.batch_size + 1 return paths, labels", "class loader(object): def __init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size = batch_size self.img_w = img_width", "'__main__': IMG_WIDTH = 1200 IMG_HEIGHT = 1600 MAX_EPOCH = 1000 img_path = '/home/kcadmin/datasets/img_list.csv'", "1200 IMG_HEIGHT = 1600 MAX_EPOCH = 1000 img_path = '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90,", "d.split(',') path = path[:-1] paths.append(path) labels.append(label) self.batch_total = len(labels) // self.batch_size + 1", "= i else: path, label = d.split(',') path = path[:-1] paths.append(path) labels.append(label) self.batch_total", "os import time import numpy as np import tensorflow as tf class loader(object):", "= tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in range(MAX_EPOCH): for batch in range(data.batch_total): i, l", "[] labels = [] only_path = True with open(csv_path, 'rb') as ff: for", "'/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path) with tf.Session() as", "data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path) with tf.Session() as sess:", "import time import numpy as np import tensorflow as tf class loader(object): def", "def __init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size = batch_size self.img_w = img_width self.img_h =", "labels.append(label) self.batch_total = len(labels) // self.batch_size + 1 return paths, labels def run(self,", "import os import time import numpy as np import tensorflow as tf class", "label = i else: path, label = d.split(',') path = path[:-1] paths.append(path) labels.append(label)", "tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in range(MAX_EPOCH): for batch in range(data.batch_total):", "= path[:-1] paths.append(path) labels.append(label) self.batch_total = len(labels) // self.batch_size + 1 return paths,", "= 1000 img_path = '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch =", "else: path, label = d.split(',') path = path[:-1] paths.append(path) labels.append(label) self.batch_total = len(labels)", "1 return paths, labels def run(self, csv_path): paths_list, labels_list = self.load_csv(csv_path) img_queue, label_queue", "= '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path) with tf.Session()", "self.batch_total = 0 def load_csv(self, csv_path): paths = [] labels = [] only_path", "ff: for i,d in enumerate(ff): if only_path: path = d[:-1] label = i", "def run(self, csv_path): paths_list, labels_list = self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True)", "= self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content = tf.read_file(img_queue) img_data =", "labels_list], shuffle=True) img_content = tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w,", "IMG_WIDTH = 1200 IMG_HEIGHT = 1600 MAX_EPOCH = 1000 img_path = '/home/kcadmin/datasets/img_list.csv' data", "path = d[:-1] label = i else: path, label = d.split(',') path =", "for batch in range(data.batch_total): i, l = sess.run([ibatch, lbatch]) print '{}/{}, {}/{}: {},{}'.format(batch,", "enumerate(ff): if only_path: path = d[:-1] label = i else: path, label =", "paths = [] labels = [] only_path = True with open(csv_path, 'rb') as", "import tensorflow as tf class loader(object): def __init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size =", "self.batch_size + 1 return paths, labels def run(self, csv_path): paths_list, labels_list = self.load_csv(csv_path)", "csv_path): paths_list, labels_list = self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content =", "numpy as np import tensorflow as tf class loader(object): def __init__(self, batch_size=1, img_width=0,", "for i,d in enumerate(ff): if only_path: path = d[:-1] label = i else:", "IMG_HEIGHT = 1600 MAX_EPOCH = 1000 img_path = '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH,", "batch_size=self.batch_size) return img_batch, label_batch if __name__ == '__main__': IMG_WIDTH = 1200 IMG_HEIGHT =", "only_path = True with open(csv_path, 'rb') as ff: for i,d in enumerate(ff): if", "img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch, label_batch if __name__ == '__main__':", "loader(object): def __init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size = batch_size self.img_w = img_width self.img_h", "lbatch = data.run(csv_path=img_path) with tf.Session() as sess: coord = tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess,", "self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content = tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content,", "= len(labels) // self.batch_size + 1 return paths, labels def run(self, csv_path): paths_list,", "0 def load_csv(self, csv_path): paths = [] labels = [] only_path = True", "= batch_size self.img_w = img_width self.img_h = img_height self.batch_total = 0 def load_csv(self,", "in range(data.batch_total): i, l = sess.run([ibatch, lbatch]) print '{}/{}, {}/{}: {},{}'.format(batch, data.batch_total, epoch,", "d[:-1] label = i else: path, label = d.split(',') path = path[:-1] paths.append(path)", "utf-8 _*_ import os import time import numpy as np import tensorflow as", "= True with open(csv_path, 'rb') as ff: for i,d in enumerate(ff): if only_path:", "ibatch, lbatch = data.run(csv_path=img_path) with tf.Session() as sess: coord = tf.train.Coordinator() thread =", "return paths, labels def run(self, csv_path): paths_list, labels_list = self.load_csv(csv_path) img_queue, label_queue =", "self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch, label_batch", "label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch, label_batch if __name__ == '__main__': IMG_WIDTH", "tf.Session() as sess: coord = tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in", "path, label = d.split(',') path = path[:-1] paths.append(path) labels.append(label) self.batch_total = len(labels) //", "img_content = tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard", "= tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch, label_batch if __name__ == '__main__': IMG_WIDTH =", "i, l = sess.run([ibatch, lbatch]) print '{}/{}, {}/{}: {},{}'.format(batch, data.batch_total, epoch, MAX_EPOCH, len(l),", "img_path = '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path) with", "csv_path): paths = [] labels = [] only_path = True with open(csv_path, 'rb')", "as ff: for i,d in enumerate(ff): if only_path: path = d[:-1] label =", "labels def run(self, csv_path): paths_list, labels_list = self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list],", "= tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch", "sess.run([ibatch, lbatch]) print '{}/{}, {}/{}: {},{}'.format(batch, data.batch_total, epoch, MAX_EPOCH, len(l), i.shape) coord.request_stop() coord.join(thread)", "coord = tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in range(MAX_EPOCH): for batch", "range(MAX_EPOCH): for batch in range(data.batch_total): i, l = sess.run([ibatch, lbatch]) print '{}/{}, {}/{}:", "= img_width self.img_h = img_height self.batch_total = 0 def load_csv(self, csv_path): paths =", "self.batch_size = batch_size self.img_w = img_width self.img_h = img_height self.batch_total = 0 def", "with tf.Session() as sess: coord = tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord) for epoch", "img_height=0): self.batch_size = batch_size self.img_w = img_width self.img_h = img_height self.batch_total = 0", "loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path) with tf.Session() as sess: coord =", "paths, labels def run(self, csv_path): paths_list, labels_list = self.load_csv(csv_path) img_queue, label_queue = tf.train.slice_input_producer([paths_list,", "i else: path, label = d.split(',') path = path[:-1] paths.append(path) labels.append(label) self.batch_total =", "= tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard =", "np import tensorflow as tf class loader(object): def __init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size", "if only_path: path = d[:-1] label = i else: path, label = d.split(',')", "tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content = tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize = tf.image.resize_images(img_data,", "img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch = data.run(csv_path=img_path) with tf.Session() as sess: coord = tf.train.Coordinator()", "__init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size = batch_size self.img_w = img_width self.img_h = img_height", "i,d in enumerate(ff): if only_path: path = d[:-1] label = i else: path,", "time import numpy as np import tensorflow as tf class loader(object): def __init__(self,", "as np import tensorflow as tf class loader(object): def __init__(self, batch_size=1, img_width=0, img_height=0):", "l = sess.run([ibatch, lbatch]) print '{}/{}, {}/{}: {},{}'.format(batch, data.batch_total, epoch, MAX_EPOCH, len(l), i.shape)", "// self.batch_size + 1 return paths, labels def run(self, csv_path): paths_list, labels_list =", "load_csv(self, csv_path): paths = [] labels = [] only_path = True with open(csv_path,", "= 0 def load_csv(self, csv_path): paths = [] labels = [] only_path =", "= [] only_path = True with open(csv_path, 'rb') as ff: for i,d in", "len(labels) // self.batch_size + 1 return paths, labels def run(self, csv_path): paths_list, labels_list", "range(data.batch_total): i, l = sess.run([ibatch, lbatch]) print '{}/{}, {}/{}: {},{}'.format(batch, data.batch_total, epoch, MAX_EPOCH,", "open(csv_path, 'rb') as ff: for i,d in enumerate(ff): if only_path: path = d[:-1]", "MAX_EPOCH = 1000 img_path = '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT) ibatch, lbatch", "label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True) img_content = tf.read_file(img_queue) img_data = tf.image.decode_jpeg(img_content, channels=3) img_resize", "tensorflow as tf class loader(object): def __init__(self, batch_size=1, img_width=0, img_height=0): self.batch_size = batch_size", "with open(csv_path, 'rb') as ff: for i,d in enumerate(ff): if only_path: path =", "if __name__ == '__main__': IMG_WIDTH = 1200 IMG_HEIGHT = 1600 MAX_EPOCH = 1000", "labels = [] only_path = True with open(csv_path, 'rb') as ff: for i,d", "= tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch, label_batch if __name__", "= d.split(',') path = path[:-1] paths.append(path) labels.append(label) self.batch_total = len(labels) // self.batch_size +", "True with open(csv_path, 'rb') as ff: for i,d in enumerate(ff): if only_path: path", "only_path: path = d[:-1] label = i else: path, label = d.split(',') path", "label = d.split(',') path = path[:-1] paths.append(path) labels.append(label) self.batch_total = len(labels) // self.batch_size", "= tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in range(MAX_EPOCH): for batch in", "paths.append(path) labels.append(label) self.batch_total = len(labels) // self.batch_size + 1 return paths, labels def", "as sess: coord = tf.train.Coordinator() thread = tf.train.start_queue_runners(sess=sess, coord=coord) for epoch in range(MAX_EPOCH):", "_*_ import os import time import numpy as np import tensorflow as tf", "== '__main__': IMG_WIDTH = 1200 IMG_HEIGHT = 1600 MAX_EPOCH = 1000 img_path =", "channels=3) img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h]) img_standard = tf.image.per_image_standardization(img_resize) img_batch, label_batch = tf.train.batch([img_standard,", "= 1600 MAX_EPOCH = 1000 img_path = '/home/kcadmin/datasets/img_list.csv' data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT)", "tf.train.batch([img_standard, label_queue], batch_size=self.batch_size) return img_batch, label_batch if __name__ == '__main__': IMG_WIDTH = 1200" ]
[ "# -*- coding: utf-8 -*- import deepnlpf.log as log class Execute (object): \"\"\"", "utf-8 -*- import deepnlpf.log as log class Execute (object): \"\"\" Execute Scripts External", "as ro r = ro.r r.source(script) return r.main(*args) def run_java(self, jar_file, *args): try:", "import rpy2.robjects as ro r = ro.r r.source(script) return r.main(*args) def run_java(self, jar_file,", "r.source(script) return r.main(*args) def run_java(self, jar_file, *args): try: import subprocess return subprocess.check_output(['java', '-jar',", "= ro.r r.source(script) return r.main(*args) def run_java(self, jar_file, *args): try: import subprocess return", "Execute (object): \"\"\" Execute Scripts External in Outher Language Programation. \"\"\" def __init__(self):", "ro.r r.source(script) return r.main(*args) def run_java(self, jar_file, *args): try: import subprocess return subprocess.check_output(['java',", "log class Execute (object): \"\"\" Execute Scripts External in Outher Language Programation. \"\"\"", "deepnlpf.log as log class Execute (object): \"\"\" Execute Scripts External in Outher Language", "coding: utf-8 -*- import deepnlpf.log as log class Execute (object): \"\"\" Execute Scripts", "import subprocess return subprocess.check_output(['java', '-jar', jar_file, *args], shell=False) except Exception as err: log.logger.error(err)", "Execute Scripts External in Outher Language Programation. \"\"\" def __init__(self): pass def run_r(self,", "return r.main(*args) def run_java(self, jar_file, *args): try: import subprocess return subprocess.check_output(['java', '-jar', jar_file,", "as log class Execute (object): \"\"\" Execute Scripts External in Outher Language Programation.", "Language Programation. \"\"\" def __init__(self): pass def run_r(self, script, *args): import rpy2.robjects as", "rpy2.robjects as ro r = ro.r r.source(script) return r.main(*args) def run_java(self, jar_file, *args):", "run_java(self, jar_file, *args): try: import subprocess return subprocess.check_output(['java', '-jar', jar_file, *args], shell=False) except", "run_r(self, script, *args): import rpy2.robjects as ro r = ro.r r.source(script) return r.main(*args)", "Outher Language Programation. \"\"\" def __init__(self): pass def run_r(self, script, *args): import rpy2.robjects", "script, *args): import rpy2.robjects as ro r = ro.r r.source(script) return r.main(*args) def", "Programation. \"\"\" def __init__(self): pass def run_r(self, script, *args): import rpy2.robjects as ro", "try: import subprocess return subprocess.check_output(['java', '-jar', jar_file, *args], shell=False) except Exception as err:", "pass def run_r(self, script, *args): import rpy2.robjects as ro r = ro.r r.source(script)", "*args): try: import subprocess return subprocess.check_output(['java', '-jar', jar_file, *args], shell=False) except Exception as", "(object): \"\"\" Execute Scripts External in Outher Language Programation. \"\"\" def __init__(self): pass", "r = ro.r r.source(script) return r.main(*args) def run_java(self, jar_file, *args): try: import subprocess", "in Outher Language Programation. \"\"\" def __init__(self): pass def run_r(self, script, *args): import", "-*- coding: utf-8 -*- import deepnlpf.log as log class Execute (object): \"\"\" Execute", "ro r = ro.r r.source(script) return r.main(*args) def run_java(self, jar_file, *args): try: import", "def run_java(self, jar_file, *args): try: import subprocess return subprocess.check_output(['java', '-jar', jar_file, *args], shell=False)", "class Execute (object): \"\"\" Execute Scripts External in Outher Language Programation. \"\"\" def", "Scripts External in Outher Language Programation. \"\"\" def __init__(self): pass def run_r(self, script,", "__init__(self): pass def run_r(self, script, *args): import rpy2.robjects as ro r = ro.r", "def __init__(self): pass def run_r(self, script, *args): import rpy2.robjects as ro r =", "-*- import deepnlpf.log as log class Execute (object): \"\"\" Execute Scripts External in", "External in Outher Language Programation. \"\"\" def __init__(self): pass def run_r(self, script, *args):", "*args): import rpy2.robjects as ro r = ro.r r.source(script) return r.main(*args) def run_java(self,", "\"\"\" def __init__(self): pass def run_r(self, script, *args): import rpy2.robjects as ro r", "<filename>deepnlpf/core/execute.py<gh_stars>1-10 # -*- coding: utf-8 -*- import deepnlpf.log as log class Execute (object):", "jar_file, *args): try: import subprocess return subprocess.check_output(['java', '-jar', jar_file, *args], shell=False) except Exception", "\"\"\" Execute Scripts External in Outher Language Programation. \"\"\" def __init__(self): pass def", "r.main(*args) def run_java(self, jar_file, *args): try: import subprocess return subprocess.check_output(['java', '-jar', jar_file, *args],", "def run_r(self, script, *args): import rpy2.robjects as ro r = ro.r r.source(script) return", "import deepnlpf.log as log class Execute (object): \"\"\" Execute Scripts External in Outher" ]
[]
[ "model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate =", "pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits =", "my_est = MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs = (pd.DataFrame(),", "test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X): pass my_est", "X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_no_predict_method(): class", "MyEstimator() score_stream = model.stream_score(X, y) score_list = list() score_stream.stream.sink(score_list.append) score_predicate = lambda: score_list", "def fit(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est =", "def test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X): pass", "MyStreamingEstimator() def test_no_predict_method(): class MyEstimator(): def fit(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator):", "list()) wrong_entries = product(wrong_Xs, wrong_ys) for X, y in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X,", "y) fit_ctr_list = [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for i in", "pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def __init__(self): self.fit_ctr = 0 self.predict_ctr = 0", "MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list,", "class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator):", "Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyStreamingEstimator() fit_results =", "for X, y in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def __init__(self):", "Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyEstimator() score_stream", "X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyEstimator() score_stream = model.stream_score(X,", "= 10 for i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda: (list(range(1, n_fits", "class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name': [], 'amount': []}),", ".1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y):", "= model.stream_score(X, y) score_list = list() score_stream.stream.sink(score_list.append) score_predicate = lambda: score_list == [1]", "in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def __init__(self): self.fit_ctr = 0", "X, y): pass def predict(self, X): pass def score(self, X, y): return 1", "streamz import Stream import pandas as pd from streamz.dataframe import DataFrame, Series from", "pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys", "[None] * n_rows, 'amount': [None] * n_rows}), pd.Series([]) X_stream, y_stream = Stream(), Stream()", "np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name': [], 'amount':", "product from streamz_ml import StreamEstimator import numpy as np from streamz import Stream", "MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_no_predict_method(): class MyEstimator(): def", "for i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda: (list(range(1, n_fits + 1))", "pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}), pd.Series([]) X_stream, y_stream = Stream(),", "n_fits)) pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1))", "pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [], []", "y_example = pd.DataFrame({'name': [], 'amount': []}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X,", "pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_no_predict_method(): class MyEstimator(): def fit(self, X): return X", "model = MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y) fit_ctr_list = [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append)", "'amount': []}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y = DataFrame(X_stream, example=X_example),", "'amount': [None] * n_rows}) X_stream = Stream() X = DataFrame(X_stream, example=X_example) model =", "fit_ctr_list = [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for i in range(n_fits):", "StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_no_predict_method(): class MyEstimator(): def fit(self,", "streamz.utils_test import wait_for, await_for def test_no_fit_method(): class MyEstimator(): def predict(self, X): return X", "1 n_rows = 20 X_example, y_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None]", "X_stream, y_stream = Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model", "pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def", "\\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def test_score_stream(): class", "list()) wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list()) wrong_entries = product(wrong_Xs, wrong_ys) for", "fit_results = model.stream_partial_fit(X, y) fit_ctr_list = [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10", "= Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyStreamingEstimator()", "lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def test_score_stream(): class MyEstimator(StreamEstimator):", "import pytest from itertools import product from streamz_ml import StreamEstimator import numpy as", "def predict(self, X): pass def score(self, X, y): return 1 n_rows = 20", "[], 'amount': []}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y = DataFrame(X_stream,", "example=X_example), Series(y_stream, example=y_example) model = MyEstimator() score_stream = model.stream_score(X, y) score_list = list()", "X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyStreamingEstimator() fit_results = model.stream_partial_fit(X,", "np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1),", "== n_fits) target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate", "await_for def test_no_fit_method(): class MyEstimator(): def predict(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator):", "fit_ctr_list) wait_for(predicate, .2) def test_stream_predict(): n_rows = 100 X_example = pd.DataFrame({'name': [None] *", "example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list", "pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate,", "= \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def test_score_stream():", "y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y)", "= (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list()) wrong_entries = product(wrong_Xs, wrong_ys) for X, y", "y): pass def predict(self, X): pass def score(self, X, y): return 1 n_rows", "* n_rows, 'amount': [None] * n_rows}) X_stream = Stream() X = DataFrame(X_stream, example=X_example)", "StreamEstimator import numpy as np from streamz import Stream import pandas as pd", "model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda:", "pd.np.array([]), list()) wrong_entries = product(wrong_Xs, wrong_ys) for X, y in wrong_entries: with pytest.raises(AssertionError):", "import wait_for, await_for def test_no_fit_method(): class MyEstimator(): def predict(self, X): return X class", "wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list()) wrong_entries = product(wrong_Xs, wrong_ys) for X,", "self.predict_ctr = 0 def partial_fit(self, X, y): self.fit_ctr += 1 return self def", "with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def __init__(self): self.fit_ctr = 0 self.predict_ctr =", "n_rows, 'amount': [None] * n_rows}) X_stream = Stream() X = DataFrame(X_stream, example=X_example) model", "= list() score_stream.stream.sink(score_list.append) score_predicate = lambda: score_list == [1] * n_rows await_for(score_predicate, .1)", "X, y): return 1 n_rows = 20 X_example, y_example = pd.DataFrame({'name': [None] *", "MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None, pd.np.array([]),", "n_rows}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream,", "y): pass def predict(self, X): pass my_est = MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit')", ".1) await_for(pred_df_predicate, .1) def test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def", "def __init__(self): self.fit_ctr = 0 self.predict_ctr = 0 def partial_fit(self, X, y): self.fit_ctr", "= Stream() X = DataFrame(X_stream, example=X_example) model = MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series", "predict(self, X): pass def score(self, X, y): return 1 n_rows = 20 X_example,", "def test_stream_predict(): n_rows = 100 X_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None]", "10 for i in range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr == n_fits) target_predictions", "range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr == n_fits) target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate", "my_est = MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def", "def predict(self, X): pass my_est = MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def", "[] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) ctr_predicate =", "Series(y_stream, example=y_example) model = MyEstimator() score_stream = model.stream_score(X, y) score_list = list() score_stream.stream.sink(score_list.append)", "<filename>streamz_ml/tests/test_stream_estimator.py import pytest from itertools import product from streamz_ml import StreamEstimator import numpy", "= 20 X_example, y_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}),", "= pd.DataFrame({'name': [], 'amount': []}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y", "y): return 1 n_rows = 20 X_example, y_example = pd.DataFrame({'name': [None] * n_rows,", "DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y) fit_ctr_list =", "pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda:", "= DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y) fit_ctr_list", "[] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example)", "predict(self, X): pass my_est = MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs():", "MyEstimator(): def __init__(self): self.fit_ctr = 0 self.predict_ctr = 0 def partial_fit(self, X, y):", "+= 1 return self def predict(self, X): self.predict_ctr += 1 return np.ones(X.shape[0]) class", "def test_no_fit_method(): class MyEstimator(): def predict(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass", "test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name': [], 'amount': []}), pd.Series([]) X_stream, y_stream = Stream(),", "numpy as np from streamz import Stream import pandas as pd from streamz.dataframe", "= [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example)", "pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def test_score_stream(): class MyEstimator(StreamEstimator): def", "await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self, X,", "model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append)", "pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [],", "wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def __init__(self): self.fit_ctr = 0 self.predict_ctr", "X = DataFrame(X_stream, example=X_example) model = MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X,", "pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}) X_stream = Stream() X =", "def test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys = (pd.DataFrame(), pd.Series(), None,", "wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list())", "range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda: (list(range(1, n_fits + 1)) == fit_ctr_list) wait_for(predicate,", "return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_no_predict_method():", "n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda: (list(range(1,", "X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr == n_fits) target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate =", "i in range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr == n_fits) target_predictions = np.ones((X_example.shape[0],", "X): self.predict_ctr += 1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit(): X_example,", "MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X):", "test_stream_predict(): n_rows = 100 X_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] *", "def partial_fit(self, X, y): pass def predict(self, X): pass def score(self, X, y):", "n_fits + 1)) == fit_ctr_list) wait_for(predicate, .2) def test_stream_predict(): n_rows = 100 X_example", "with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list())", "y): self.fit_ctr += 1 return self def predict(self, X): self.predict_ctr += 1 return", "MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y) fit_ctr_list = [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits =", "= 0 def partial_fit(self, X, y): self.fit_ctr += 1 return self def predict(self,", "from streamz_ml import StreamEstimator import numpy as np from streamz import Stream import", "in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda: (list(range(1, n_fits + 1)) == fit_ctr_list)", "import Stream import pandas as pd from streamz.dataframe import DataFrame, Series from streamz.utils_test", "streamz.dataframe import DataFrame, Series from streamz.utils_test import wait_for, await_for def test_no_fit_method(): class MyEstimator():", "[], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) ctr_predicate", "score(self, X, y): return 1 n_rows = 20 X_example, y_example = pd.DataFrame({'name': [None]", "def predict(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est =", "= MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y) fit_ctr_list = [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits", "'amount': [None] * n_rows}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y =", "return self def predict(self, X): self.predict_ctr += 1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator):", "wrong_ys) for X, y in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def", "ctr_predicate = lambda: (model.predict_ctr == n_fits) target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\", "Stream() X = DataFrame(X_stream, example=X_example) model = MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series =", "fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate", "n_rows = 100 X_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows})", "[None] * n_rows, 'amount': [None] * n_rows}) X_stream = Stream() X = DataFrame(X_stream,", "None, pd.np.array([]), list()) wrong_entries = product(wrong_Xs, wrong_ys) for X, y in wrong_entries: with", "score_list = list() score_stream.stream.sink(score_list.append) score_predicate = lambda: score_list == [1] * n_rows await_for(score_predicate,", "class MyEstimator(): def fit(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError):", "(model.predict_ctr == n_fits) target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1))", "= DataFrame(X_stream, example=X_example) model = MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data))", "= 0 self.predict_ctr = 0 def partial_fit(self, X, y): self.fit_ctr += 1 return", "StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self,", "pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass", "pd.np.array([]), list()) wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list()) wrong_entries = product(wrong_Xs, wrong_ys)", "pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate,", "pd.DataFrame({'name': [], 'amount': []}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y =", "partial_fit(self, X, y): self.fit_ctr += 1 return self def predict(self, X): self.predict_ctr +=", "from itertools import product from streamz_ml import StreamEstimator import numpy as np from", "+= 1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit(): X_example, y_example =", "predict(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator()", "return 1 n_rows = 20 X_example, y_example = pd.DataFrame({'name': [None] * n_rows, 'amount':", "import pandas as pd from streamz.dataframe import DataFrame, Series from streamz.utils_test import wait_for,", "pass def predict(self, X): pass def score(self, X, y): return 1 n_rows =", "import DataFrame, Series from streamz.utils_test import wait_for, await_for def test_no_fit_method(): class MyEstimator(): def", "my_est._check_method('predict') def test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys = (pd.DataFrame(), pd.Series(),", "= (pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list()) wrong_entries", "\\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate,", "= model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10", "(pd.DataFrame(), pd.Series(), None, pd.np.array([]), list()) wrong_entries = product(wrong_Xs, wrong_ys) for X, y in", "X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def", "as pd from streamz.dataframe import DataFrame, Series from streamz.utils_test import wait_for, await_for def", "1 return self def predict(self, X): self.predict_ctr += 1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator,", "def partial_fit(self, X, y): pass def predict(self, X): pass my_est = MyEstimator() with", "+ 1)) == fit_ctr_list) wait_for(predicate, .2) def test_stream_predict(): n_rows = 100 X_example =", "pytest from itertools import product from streamz_ml import StreamEstimator import numpy as np", "def test_no_predict_method(): class MyEstimator(): def fit(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass", "self.predict_ctr += 1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit(): X_example, y_example", "pandas as pd from streamz.dataframe import DataFrame, Series from streamz.utils_test import wait_for, await_for", "pass def score(self, X, y): return 1 n_rows = 20 X_example, y_example =", "MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator): def", "n_rows}) X_stream = Stream() X = DataFrame(X_stream, example=X_example) model = MyStreamingEstimator() example_data =", "= product(wrong_Xs, wrong_ys) for X, y in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class", "10 for i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda: (list(range(1, n_fits +", "lambda: (model.predict_ctr == n_fits) target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1),", "StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def __init__(self): self.fit_ctr = 0 self.predict_ctr = 0 def", "1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name':", "class MyEstimator(): def predict(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError):", ".2) def test_stream_predict(): n_rows = 100 X_example = pd.DataFrame({'name': [None] * n_rows, 'amount':", "0 def partial_fit(self, X, y): self.fit_ctr += 1 return self def predict(self, X):", "= 10 for i in range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr == n_fits)", "= MyStreamingEstimator() def test_no_predict_method(): class MyEstimator(): def fit(self, X): return X class MyStreamingEstimator(MyEstimator,", "X, y): pass def predict(self, X): pass my_est = MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score')", "def test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X): pass", "StreamEstimator): pass def test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name': [], 'amount': []}), pd.Series([]) X_stream,", "test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]),", "None, pd.np.array([]), list()) wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list()) wrong_entries = product(wrong_Xs,", "import product from streamz_ml import StreamEstimator import numpy as np from streamz import", "X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_check_method(): class", "Series from streamz.utils_test import wait_for, await_for def test_no_fit_method(): class MyEstimator(): def predict(self, X):", "(list(range(1, n_fits + 1)) == fit_ctr_list) wait_for(predicate, .2) def test_stream_predict(): n_rows = 100", "X, y in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def __init__(self): self.fit_ctr", "X): pass my_est = MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs", "n_fits) target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate =", "i in range(n_fits): X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda: (list(range(1, n_fits + 1)) ==", "n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr ==", "await_for(pred_df_predicate, .1) def test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self,", "X_stream.emit(X_example) y_stream.emit(y_example) predicate = lambda: (list(range(1, n_fits + 1)) == fit_ctr_list) wait_for(predicate, .2)", "pd.Series(), None, pd.np.array([]), list()) wrong_entries = product(wrong_Xs, wrong_ys) for X, y in wrong_entries:", "20 X_example, y_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}), pd.Series([])", "* n_rows}) X_stream = Stream() X = DataFrame(X_stream, example=X_example) model = MyStreamingEstimator() example_data", "= [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example)", "X_stream = Stream() X = DataFrame(X_stream, example=X_example) model = MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0]))", "y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits", "for i in range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr == n_fits) target_predictions =", "return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_check_method():", "class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X): pass def score(self,", "y_stream = Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model =", "== fit_ctr_list) wait_for(predicate, .2) def test_stream_predict(): n_rows = 100 X_example = pd.DataFrame({'name': [None]", "def predict(self, X): self.predict_ctr += 1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def", "n_rows, 'amount': [None] * n_rows}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y", "* n_rows}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y = DataFrame(X_stream, example=X_example),", "Stream import pandas as pd from streamz.dataframe import DataFrame, Series from streamz.utils_test import", "100 X_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}) X_stream =", "example=y_example) model = MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y) fit_ctr_list = [] fit_results.map(lambda model:", "model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10 for", "y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10 for i", "product(wrong_Xs, wrong_ys) for X, y in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator():", "= np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\ lambda:", "MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name': [], 'amount': []}), pd.Series([])", "fit(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator()", "= pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list =", "Series(y_stream, example=y_example) model = MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y) fit_ctr_list = [] fit_results.map(lambda", "X): pass def score(self, X, y): return 1 n_rows = 20 X_example, y_example", "model.stream_partial_fit(X, y) fit_ctr_list = [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for i", "itertools import product from streamz_ml import StreamEstimator import numpy as np from streamz", "example=X_example) model = MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df =", "y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyEstimator() score_stream = model.stream_score(X, y)", "__init__(self): self.fit_ctr = 0 self.predict_ctr = 0 def partial_fit(self, X, y): self.fit_ctr +=", "DataFrame(X_stream, example=X_example) model = MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df", "= pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}), pd.Series([]) X_stream, y_stream =", "= MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None,", "def test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name': [], 'amount': []}), pd.Series([]) X_stream, y_stream =", "X, y): self.fit_ctr += 1 return self def predict(self, X): self.predict_ctr += 1", "y_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}), pd.Series([]) X_stream, y_stream", "example=y_example) model = MyEstimator() score_stream = model.stream_score(X, y) score_list = list() score_stream.stream.sink(score_list.append) score_predicate", "pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self, X,", "score_stream = model.stream_score(X, y) score_list = list() score_stream.stream.sink(score_list.append) score_predicate = lambda: score_list ==", "test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X): pass def", "= MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self,", "self def predict(self, X): self.predict_ctr += 1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass", "import numpy as np from streamz import Stream import pandas as pd from", "n_rows = 20 X_example, y_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] *", "MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X): pass def score(self, X,", "with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_check_method(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y):", "= lambda: (model.predict_ctr == n_fits) target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\ lambda:", "target_predictions = np.ones((X_example.shape[0], n_fits)) pred_series_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\", "predict(self, X): self.predict_ctr += 1 return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit():", "= model.stream_partial_fit(X, y) fit_ctr_list = [] fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append) n_fits = 10 for", "class MyEstimator(): def __init__(self): self.fit_ctr = 0 self.predict_ctr = 0 def partial_fit(self, X,", "model.stream_score(X, y) score_list = list() score_stream.stream.sink(score_list.append) score_predicate = lambda: score_list == [1] *", "= DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyEstimator() score_stream = model.stream_score(X, y) score_list", "class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_no_predict_method(): class MyEstimator():", ".1) def test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X):", "import StreamEstimator import numpy as np from streamz import Stream import pandas as", "= Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyEstimator()", "DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyEstimator() score_stream = model.stream_score(X, y) score_list =", "MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X): pass my_est = MyEstimator()", "def score(self, X, y): return 1 n_rows = 20 X_example, y_example = pd.DataFrame({'name':", "in range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr == n_fits) target_predictions = np.ones((X_example.shape[0], n_fits))", "1)) == fit_ctr_list) wait_for(predicate, .2) def test_stream_predict(): n_rows = 100 X_example = pd.DataFrame({'name':", "with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_no_predict_method(): class MyEstimator(): def fit(self, X): return", "predicate = lambda: (list(range(1, n_fits + 1)) == fit_ctr_list) wait_for(predicate, .2) def test_stream_predict():", "pred_series_list, pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10 for i in", "= MyEstimator() score_stream = model.stream_score(X, y) score_list = list() score_stream.stream.sink(score_list.append) score_predicate = lambda:", "test_no_predict_method(): class MyEstimator(): def fit(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with", "lambda: (list(range(1, n_fits + 1)) == fit_ctr_list) wait_for(predicate, .2) def test_stream_predict(): n_rows =", "MyEstimator(): def fit(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est", "self.fit_ctr += 1 return self def predict(self, X): self.predict_ctr += 1 return np.ones(X.shape[0])", "y in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y) class MyEstimator(): def __init__(self): self.fit_ctr =", "pd from streamz.dataframe import DataFrame, Series from streamz.utils_test import wait_for, await_for def test_no_fit_method():", "lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1)", "Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyEstimator() score_stream =", "pred_df.stream.sink(pred_df_list.append) n_fits = 10 for i in range(n_fits): X_stream.emit(X_example) ctr_predicate = lambda: (model.predict_ctr", "MyEstimator(): def predict(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with pytest.raises(TypeError): my_est", "y) class MyEstimator(): def __init__(self): self.fit_ctr = 0 self.predict_ctr = 0 def partial_fit(self,", "X_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}) X_stream = Stream()", "pass def predict(self, X): pass my_est = MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict')", "= pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}) X_stream = Stream() X", "DataFrame, Series from streamz.utils_test import wait_for, await_for def test_no_fit_method(): class MyEstimator(): def predict(self,", "pass with pytest.raises(TypeError): my_est = MyStreamingEstimator() def test_no_predict_method(): class MyEstimator(): def fit(self, X):", "my_est = MyStreamingEstimator() def test_no_predict_method(): class MyEstimator(): def fit(self, X): return X class", "model = MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X,", "await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass", "(pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list()) wrong_entries =", "my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys = (pd.DataFrame(),", "= MyStreamingEstimator() example_data = pd.Series(pd.np.ones(X_example.shape[0])) pred_series = model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data))", "wait_for, await_for def test_no_fit_method(): class MyEstimator(): def predict(self, X): return X class MyStreamingEstimator(MyEstimator,", "X_example, y_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}), pd.Series([]) X_stream,", "np from streamz import Stream import pandas as pd from streamz.dataframe import DataFrame,", "target_predictions.reshape(-1)) pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1)", "= model.stream_predict(X, y_example=pd.Series(example_data)) pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data)) pred_series_list, pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append)", "wait_for(predicate, .2) def test_stream_predict(): n_rows = 100 X_example = pd.DataFrame({'name': [None] * n_rows,", "[None] * n_rows}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y = DataFrame(X_stream,", "target_predictions.reshape(-1)) await_for(ctr_predicate, .1) await_for(pred_series_predicate, .1) await_for(pred_df_predicate, .1) def test_score_stream(): class MyEstimator(StreamEstimator): def partial_fit(self,", "partial_fit(self, X, y): pass def predict(self, X): pass my_est = MyEstimator() with pytest.raises(AttributeError):", "X_example, y_example = pd.DataFrame({'name': [], 'amount': []}), pd.Series([]) X_stream, y_stream = Stream(), Stream()", "as np from streamz import Stream import pandas as pd from streamz.dataframe import", "y_stream.emit(y_example) predicate = lambda: (list(range(1, n_fits + 1)) == fit_ctr_list) wait_for(predicate, .2) def", "my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list()) wrong_ys =", "pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example)", "test_no_fit_method(): class MyEstimator(): def predict(self, X): return X class MyStreamingEstimator(MyEstimator, StreamEstimator): pass with", "= \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1)) pred_df_predicate = \\ lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1)) await_for(ctr_predicate, .1)", "partial_fit(self, X, y): pass def predict(self, X): pass def score(self, X, y): return", "[None] * n_rows}) X_stream = Stream() X = DataFrame(X_stream, example=X_example) model = MyStreamingEstimator()", "streamz_ml import StreamEstimator import numpy as np from streamz import Stream import pandas", "model = MyEstimator() score_stream = model.stream_score(X, y) score_list = list() score_stream.stream.sink(score_list.append) score_predicate =", "self.fit_ctr = 0 self.predict_ctr = 0 def partial_fit(self, X, y): self.fit_ctr += 1", "* n_rows, 'amount': [None] * n_rows}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X,", "pred_df_list = [], [] pred_series.stream.sink(pred_series_list.append) pred_df.stream.sink(pred_df_list.append) n_fits = 10 for i in range(n_fits):", "from streamz import Stream import pandas as pd from streamz.dataframe import DataFrame, Series", "pass my_est = MyEstimator() with pytest.raises(AttributeError): my_est._check_method('score') my_est._check_method('partial_fit') my_est._check_method('predict') def test_stream_inputs(): wrong_Xs =", "= 100 X_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}) X_stream", "Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example) model = MyStreamingEstimator() fit_results", "[]}), pd.Series([]) X_stream, y_stream = Stream(), Stream() X, y = DataFrame(X_stream, example=X_example), Series(y_stream,", "example=X_example), Series(y_stream, example=y_example) model = MyStreamingEstimator() fit_results = model.stream_partial_fit(X, y) fit_ctr_list = []", "y) score_list = list() score_stream.stream.sink(score_list.append) score_predicate = lambda: score_list == [1] * n_rows", "from streamz.utils_test import wait_for, await_for def test_no_fit_method(): class MyEstimator(): def predict(self, X): return", "0 self.predict_ctr = 0 def partial_fit(self, X, y): self.fit_ctr += 1 return self", "from streamz.dataframe import DataFrame, Series from streamz.utils_test import wait_for, await_for def test_no_fit_method(): class", "wrong_entries = product(wrong_Xs, wrong_ys) for X, y in wrong_entries: with pytest.raises(AssertionError): StreamEstimator._check_stream_inputs(X, y)", "class MyEstimator(StreamEstimator): def partial_fit(self, X, y): pass def predict(self, X): pass my_est =", "def partial_fit(self, X, y): self.fit_ctr += 1 return self def predict(self, X): self.predict_ctr", "return np.ones(X.shape[0]) class MyStreamingEstimator(MyEstimator, StreamEstimator): pass def test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name': [],", "= lambda: (list(range(1, n_fits + 1)) == fit_ctr_list) wait_for(predicate, .2) def test_stream_predict(): n_rows", "pass def test_stream_partial_fit(): X_example, y_example = pd.DataFrame({'name': [], 'amount': []}), pd.Series([]) X_stream, y_stream" ]
[ "from selenium import webdriver import geckodriver_binary # Adds geckodriver binary to path def", "geckodriver binary to path def test_driver(): driver = webdriver.Firefox() driver.get(\"http://www.python.org\") assert \"Python\" in", "binary to path def test_driver(): driver = webdriver.Firefox() driver.get(\"http://www.python.org\") assert \"Python\" in driver.titl", "webdriver import geckodriver_binary # Adds geckodriver binary to path def test_driver(): driver =", "import webdriver import geckodriver_binary # Adds geckodriver binary to path def test_driver(): driver", "# Adds geckodriver binary to path def test_driver(): driver = webdriver.Firefox() driver.get(\"http://www.python.org\") assert", "Adds geckodriver binary to path def test_driver(): driver = webdriver.Firefox() driver.get(\"http://www.python.org\") assert \"Python\"", "geckodriver_binary # Adds geckodriver binary to path def test_driver(): driver = webdriver.Firefox() driver.get(\"http://www.python.org\")", "import geckodriver_binary # Adds geckodriver binary to path def test_driver(): driver = webdriver.Firefox()", "selenium import webdriver import geckodriver_binary # Adds geckodriver binary to path def test_driver():", "to path def test_driver(): driver = webdriver.Firefox() driver.get(\"http://www.python.org\") assert \"Python\" in driver.titl driver.quit()" ]
[]
[ "# BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME>", "class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception", "HDF5 reading of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5 reading of", "\"LICENSES/LICENSE.txt\" for full # license terms and contributor agreement. # \"\"\"Exceptions specific to", "\"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ]", "class HDFMappingError(Exception): \"\"\"Exception for failed HDF5 mappings\"\"\" def __init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}'", "HDF5 reading of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError): \"\"\"Exception for failed HDF5 reading of", "This file is part of the bapsflib package, a Python toolkit for the", "\"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception for failed HDF5 mappings\"\"\"", "License: Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\" for full # license terms and contributor", "group at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME> and contributors", "license terms and contributor agreement. # \"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__ = [", "HDF5 mappings\"\"\" def __init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\") class HDFReadError(Exception):", "# This file is part of the bapsflib package, a Python toolkit for", "HDFMappingError(Exception): \"\"\"Exception for failed HDF5 mappings\"\"\" def __init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping", "class HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError): \"\"\"Exception", "\"\"\"Exception for failed HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5 reading", "__all__ = [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception for", "# # Copyright 2017-2018 <NAME> and contributors # # License: Standard 3-clause BSD;", "\"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception for failed", "Python toolkit for the # BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ #", "contributors # # License: Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\" for full # license", "for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5", "for failed HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5 reading of", "toolkit for the # BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ # #", "\"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception for failed HDF5 mappings\"\"\" def __init__(self, device_name:", "for the # BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright", "failed HDF5 reading of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError): \"\"\"Exception for failed HDF5 reading", "# \"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\",", "why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\") class HDFReadError(Exception): \"\"\"Exception for failed HDF5 reading\"\"\" pass", "mapping failed: {why}\") class HDFReadError(Exception): \"\"\"Exception for failed HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError):", "bapsflib package, a Python toolkit for the # BaPSF group at UCLA. #", "contributor agreement. # \"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\",", "2017-2018 <NAME> and contributors # # License: Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\" for", "part of the bapsflib package, a Python toolkit for the # BaPSF group", "Copyright 2017-2018 <NAME> and contributors # # License: Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\"", "= [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception for failed", "failed HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\"", "# Copyright 2017-2018 <NAME> and contributors # # License: Standard 3-clause BSD; see", "a Python toolkit for the # BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/", "reading of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\"", "pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError):", "class HDFReadError(Exception): \"\"\"Exception for failed HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed", "Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\" for full # license terms and contributor agreement.", "failed HDF5 reading of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5 reading", "`bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception", "for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError): \"\"\"Exception for failed HDF5", "# http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME> and contributors # # License: Standard", "agreement. # \"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\",", "the bapsflib package, a Python toolkit for the # BaPSF group at UCLA.", "HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass", "HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError): \"\"\"Exception for", "of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass", "\"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception for failed HDF5 mappings\"\"\" def", "str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\") class HDFReadError(Exception): \"\"\"Exception for failed HDF5 reading\"\"\"", "for full # license terms and contributor agreement. # \"\"\"Exceptions specific to `bapsflib`.\"\"\"", "full # license terms and contributor agreement. # \"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__", "BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME> and", "http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME> and contributors # # License: Standard 3-clause", "and contributor agreement. # \"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\", \"HDFReadControlError\",", "see \"LICENSES/LICENSE.txt\" for full # license terms and contributor agreement. # \"\"\"Exceptions specific", "at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME> and contributors #", "for failed HDF5 mappings\"\"\" def __init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\")", "UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME> and contributors # #", "of the bapsflib package, a Python toolkit for the # BaPSF group at", "BSD; see \"LICENSES/LICENSE.txt\" for full # license terms and contributor agreement. # \"\"\"Exceptions", "<NAME> and contributors # # License: Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\" for full", "super().__init__(f\"'{device_name}' mapping failed: {why}\") class HDFReadError(Exception): \"\"\"Exception for failed HDF5 reading\"\"\" pass class", "mappings\"\"\" def __init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\") class HDFReadError(Exception): \"\"\"Exception", "the # BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018", "# # License: Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\" for full # license terms", "device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\") class HDFReadError(Exception): \"\"\"Exception for failed HDF5", "is part of the bapsflib package, a Python toolkit for the # BaPSF", "pass class HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError):", "\"\"\"Exception for failed HDF5 mappings\"\"\" def __init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed:", "\"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception for failed HDF5 mappings\"\"\" def __init__(self,", "and contributors # # License: Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\" for full #", "# License: Standard 3-clause BSD; see \"LICENSES/LICENSE.txt\" for full # license terms and", "] class HDFMappingError(Exception): \"\"\"Exception for failed HDF5 mappings\"\"\" def __init__(self, device_name: str, why=\"\"):", "HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception for", "package, a Python toolkit for the # BaPSF group at UCLA. # #", "of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass", "reading of digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\"", "failed: {why}\") class HDFReadError(Exception): \"\"\"Exception for failed HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception", "def __init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\") class HDFReadError(Exception): \"\"\"Exception for", "reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class", "terms and contributor agreement. # \"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\",", "# # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME> and contributors # # License:", "3-clause BSD; see \"LICENSES/LICENSE.txt\" for full # license terms and contributor agreement. #", "\"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class HDFReadMSIError(HDFReadError): \"\"\"Exception for failed", "# license terms and contributor agreement. # \"\"\"Exceptions specific to `bapsflib`.\"\"\" __all__ =", "__init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\") class HDFReadError(Exception): \"\"\"Exception for failed", "{why}\") class HDFReadError(Exception): \"\"\"Exception for failed HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for", "specific to `bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class", "digitizer.\"\"\" pass class HDFReadControlError(HDFReadError): \"\"\"Exception for failed HDF5 reading of digitizer.\"\"\" pass class", "to `bapsflib`.\"\"\" __all__ = [ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception):", "HDFReadError(Exception): \"\"\"Exception for failed HDF5 reading\"\"\" pass class HDFReadDigiError(HDFReadError): \"\"\"Exception for failed HDF5", "failed HDF5 mappings\"\"\" def __init__(self, device_name: str, why=\"\"): super().__init__(f\"'{device_name}' mapping failed: {why}\") class", "file is part of the bapsflib package, a Python toolkit for the #", "\"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception for failed HDF5 mappings\"\"\" def __init__(self, device_name: str,", "[ \"HDFMappingError\", \"HDFReadControlError\", \"HDFReadDigiError\", \"HDFReadMSIError\", \"HDFReadError\", ] class HDFMappingError(Exception): \"\"\"Exception for failed HDF5" ]
[ "torchvision import transforms import torchvision.models as models from torch.utils.data import DataLoader from utils", "use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\" if use_cuda else \"cpu\") print(\"Available", "network resnet_model = create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create features", "output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform pca output = perform_pca_on_single_vector(output) # Save fts img_name", "if \"paris\" in img_dir: print(\"> Blacklisted images must be removed\") blacklist = [\"paris_louvre_000136.jpg\",", "\"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir,", "sorted(os.listdir(img_dir))] # Create dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1,", "for file in sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))]", "this function creates a triplet network, loads the parameters and generates the dimension", "img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del output, image", "h, w = image.size() # Get output output = model.get_embedding(image.view(-1, c, h, w))", "model_weights_path : path of trained weights img_dir : directory that holds the images", "file in sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))] #", "[0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda", "file in sorted(os.listdir(img_dir))] # Create dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader =", "utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given a model weights path,", "with torch.no_grad(): for idx, image in enumerate(tqdm(eval_loader)): # Move image to device and", "torch.utils.data import DataLoader from utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given", "blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(files)] else:", "from utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given a model weights", "it in the provided feature directory. Args: model_weights_path : path of trained weights", "images must be removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\",", "\"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for blacklisted_file in blacklist: files.remove(blacklisted_file)", "= [os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))] # Create dataset eval_dataset = EmbeddingDataset(img_dir,", ": path of trained weights img_dir : directory that holds the images fts_dir", "run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda parameters use_cuda = torch.cuda.is_available() np.random.seed(2019)", "\"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\",", "= DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) # Create embedding network resnet_model = create_embedding_net() model", "be removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\",", "image in enumerate(tqdm(eval_loader)): # Move image to device and get crops image =", "0.406] std = [0.229, 0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop)", "in enumerate(tqdm(eval_loader)): # Move image to device and get crops image = image.to(device)", "crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])), ]) # Creat image database if", "perform_pca_on_single_vector(output) # Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir, img_name) np.save(save_path,", "= os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del output, image gc.collect() # if __name__ ==", "(using pca) vectors and save it in the provided feature directory. Args: model_weights_path", "in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])), ]) # Creat", "trained weights img_dir : directory that holds the images fts_dir : directory to", "[os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))] # Create dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES,", "in img_dir: print(\"> Blacklisted images must be removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\",", "output.flatten()) del output, image gc.collect() # if __name__ == '__main__': # create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\",", "import QueryExtractor, EmbeddingDataset from torchvision import transforms import torchvision.models as models from torch.utils.data", "image to device and get crops image = image.to(device) bs, ncrops, c, h,", "None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda parameters use_cuda =", "create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create features with torch.no_grad(): for", "in crops])), ]) # Creat image database if \"paris\" in img_dir: print(\"> Blacklisted", "device) # Create transforms mean = [0.485, 0.456, 0.406] std = [0.229, 0.224,", "\"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\",", "std = [0.229, 0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for", "tqdm import tqdm import torch import gc import os import numpy as np", "h, w)) output = output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform pca output = perform_pca_on_single_vector(output)", "fts_dir): \"\"\" Given a model weights path, this function creates a triplet network,", "Perform pca output = perform_pca_on_single_vector(output) # Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path", "Create transforms mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] transforms_test", "the provided feature directory. Args: model_weights_path : path of trained weights img_dir :", "[0.229, 0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in", "must be removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\",", "parameters and generates the dimension reduced (using pca) vectors and save it in", "= output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform pca output = perform_pca_on_single_vector(output) # Save fts", "as models from torch.utils.data import DataLoader from utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir,", "\"\"\" Given a model weights path, this function creates a triplet network, loads", "c, h, w = image.size() # Get output output = model.get_embedding(image.view(-1, c, h,", "provided feature directory. Args: model_weights_path : path of trained weights img_dir : directory", "import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given a model weights path, this", "perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given a model weights path, this function", "= model.get_embedding(image.view(-1, c, h, w)) output = output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform pca", "ncrops, -1).mean(1).cpu().numpy() # Perform pca output = perform_pca_on_single_vector(output) # Save fts img_name =", "for blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(files)]", "transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop)", "= [os.path.join(img_dir, file) for file in sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir, file) for", "= image.to(device) bs, ncrops, c, h, w = image.size() # Get output output", "transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])), ]) # Creat image database", "QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) # Create embedding network resnet_model", "transforms mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] transforms_test =", "numpy as np from sklearn.metrics import cohen_kappa_score from model import TripletNet, create_embedding_net from", "blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\",", "in sorted(os.listdir(img_dir))] # Create dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset,", "image.size() # Get output output = model.get_embedding(image.view(-1, c, h, w)) output = output.view(bs,", "batch_size=1, num_workers=0, shuffle=False) # Create embedding network resnet_model = create_embedding_net() model = TripletNet(resnet_model)", "# Perform pca output = perform_pca_on_single_vector(output) # Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\")", "else: QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))] # Create dataset eval_dataset", "enumerate(tqdm(eval_loader)): # Move image to device and get crops image = image.to(device) bs,", "creates a triplet network, loads the parameters and generates the dimension reduced (using", "# Creat image database if \"paris\" in img_dir: print(\"> Blacklisted images must be", "torch.device(\"cuda\" if use_cuda else \"cpu\") print(\"Available device = \", device) # Create transforms", "del output, image gc.collect() # if __name__ == '__main__': # create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\")", "create_embedding_net from dataset import QueryExtractor, EmbeddingDataset from torchvision import transforms import torchvision.models as", "c, h, w)) output = output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform pca output =", "Blacklisted images must be removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\",", "image.to(device) bs, ncrops, c, h, w = image.size() # Get output output =", "blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(files)] else: QUERY_IMAGES =", "for crop in crops])), ]) # Creat image database if \"paris\" in img_dir:", "DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) # Create embedding network resnet_model = create_embedding_net() model =", "eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) # Create", "# Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir, img_name) np.save(save_path, output.flatten())", "from model import TripletNet, create_embedding_net from dataset import QueryExtractor, EmbeddingDataset from torchvision import", "import gc import os import numpy as np from sklearn.metrics import cohen_kappa_score from", "file) for file in sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir, file) for file in", "models from torch.utils.data import DataLoader from utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir):", "= os.listdir(img_dir) for blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file) for file", "dimension reduced (using pca) vectors and save it in the provided feature directory.", "Create cuda parameters use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\" if use_cuda", "[os.path.join(img_dir, file) for file in sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir, file) for file", "img_dir, fts_dir): \"\"\" Given a model weights path, this function creates a triplet", "std=std)(crop) for crop in crops])), ]) # Creat image database if \"paris\" in", "in the provided feature directory. Args: model_weights_path : path of trained weights img_dir", "QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir, file)", "= [0.229, 0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop", "device = \", device) # Create transforms mean = [0.485, 0.456, 0.406] std", "Get output output = model.get_embedding(image.view(-1, c, h, w)) output = output.view(bs, ncrops, -1).mean(1).cpu().numpy()", "= EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) # Create embedding", "that holds the images fts_dir : directory to store the embeddings Returns: None", "transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop", "fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del output,", "output = perform_pca_on_single_vector(output) # Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir,", "and save it in the provided feature directory. Args: model_weights_path : path of", "directory that holds the images fts_dir : directory to store the embeddings Returns:", "torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\" if use_cuda else \"cpu\") print(\"Available device =", "store the embeddings Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create", "embeddings Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda parameters", "tqdm import torch import gc import os import numpy as np from sklearn.metrics", "]) # Creat image database if \"paris\" in img_dir: print(\"> Blacklisted images must", "def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given a model weights path, this function creates", "EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) # Create embedding network", "\"paris\" in img_dir: print(\"> Blacklisted images must be removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\",", "num_workers=0, shuffle=False) # Create embedding network resnet_model = create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path))", "0.456, 0.406] std = [0.229, 0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops:", "fts_dir : directory to store the embeddings Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\",", "import tqdm import torch import gc import os import numpy as np from", "DataLoader from utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given a model", "transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for", "gc import os import numpy as np from sklearn.metrics import cohen_kappa_score from model", "features with torch.no_grad(): for idx, image in enumerate(tqdm(eval_loader)): # Move image to device", "# Create cuda parameters use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\" if", "directory to store the embeddings Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\"", "model weights path, this function creates a triplet network, loads the parameters and", "\"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for", "os.listdir(img_dir) for blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file) for file in", "model import TripletNet, create_embedding_net from dataset import QueryExtractor, EmbeddingDataset from torchvision import transforms", "image database if \"paris\" in img_dir: print(\"> Blacklisted images must be removed\") blacklist", "os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del output, image gc.collect() # if __name__ == '__main__':", "\"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file)", "w)) output = output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform pca output = perform_pca_on_single_vector(output) #", "import cohen_kappa_score from model import TripletNet, create_embedding_net from dataset import QueryExtractor, EmbeddingDataset from", "use_cuda else \"cpu\") print(\"Available device = \", device) # Create transforms mean =", "if use_cuda else \"cpu\") print(\"Available device = \", device) # Create transforms mean", "os import numpy as np from sklearn.metrics import cohen_kappa_score from model import TripletNet,", "embedding network resnet_model = create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create", "holds the images fts_dir : directory to store the embeddings Returns: None Eg", "device = torch.device(\"cuda\" if use_cuda else \"cpu\") print(\"Available device = \", device) #", "feature directory. Args: model_weights_path : path of trained weights img_dir : directory that", "path, this function creates a triplet network, loads the parameters and generates the", "np.save(save_path, output.flatten()) del output, image gc.collect() # if __name__ == '__main__': # create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\",", "save_path = os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del output, image gc.collect() # if __name__", "[\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\",", "# Get output output = model.get_embedding(image.view(-1, c, h, w)) output = output.view(bs, ncrops,", "file) for file in sorted(os.listdir(img_dir))] # Create dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test)", "img_name) np.save(save_path, output.flatten()) del output, image gc.collect() # if __name__ == '__main__': #", "\"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES", ": directory that holds the images fts_dir : directory to store the embeddings", "\"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files =", "weights img_dir : directory that holds the images fts_dir : directory to store", "np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\" if use_cuda else \"cpu\") print(\"Available device = \",", "torchvision.models as models from torch.utils.data import DataLoader from utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path,", "# Create transforms mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225]", "import DataLoader from utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given a", "\"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\",", "save it in the provided feature directory. Args: model_weights_path : path of trained", "function creates a triplet network, loads the parameters and generates the dimension reduced", "= transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean,", "Create embedding network resnet_model = create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() #", "and get crops image = image.to(device) bs, ncrops, c, h, w = image.size()", "for crop in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])), ])", "= \", device) # Create transforms mean = [0.485, 0.456, 0.406] std =", "device and get crops image = image.to(device) bs, ncrops, c, h, w =", "output = model.get_embedding(image.view(-1, c, h, w)) output = output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform", "bs, ncrops, c, h, w = image.size() # Get output output = model.get_embedding(image.view(-1,", "triplet network, loads the parameters and generates the dimension reduced (using pca) vectors", "as np from sklearn.metrics import cohen_kappa_score from model import TripletNet, create_embedding_net from dataset", "shuffle=False) # Create embedding network resnet_model = create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device)", "Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda parameters use_cuda", "create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda parameters use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019)", "\"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\",", "from torch.utils.data import DataLoader from utils import perform_pca_on_single_vector def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\"", "from dataset import QueryExtractor, EmbeddingDataset from torchvision import transforms import torchvision.models as models", "\"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir)", "\"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",]", "resnet_model = create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create features with", "0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda", "\"\"\" # Create cuda parameters use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\"", "Given a model weights path, this function creates a triplet network, loads the", "loads the parameters and generates the dimension reduced (using pca) vectors and save", "Args: model_weights_path : path of trained weights img_dir : directory that holds the", "0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),", "Move image to device and get crops image = image.to(device) bs, ncrops, c,", "TripletNet, create_embedding_net from dataset import QueryExtractor, EmbeddingDataset from torchvision import transforms import torchvision.models", "import TripletNet, create_embedding_net from dataset import QueryExtractor, EmbeddingDataset from torchvision import transforms import", "img_dir : directory that holds the images fts_dir : directory to store the", "import os import numpy as np from sklearn.metrics import cohen_kappa_score from model import", "mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460),", "for idx, image in enumerate(tqdm(eval_loader)): # Move image to device and get crops", "and generates the dimension reduced (using pca) vectors and save it in the", "of trained weights img_dir : directory that holds the images fts_dir : directory", "parameters use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\" if use_cuda else \"cpu\")", "QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))] # Create dataset eval_dataset =", "network, loads the parameters and generates the dimension reduced (using pca) vectors and", "the parameters and generates the dimension reduced (using pca) vectors and save it", "\"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\",", "Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del", "a triplet network, loads the parameters and generates the dimension reduced (using pca)", "eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) # Create embedding network resnet_model = create_embedding_net()", "crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in", "from tqdm import tqdm import torch import gc import os import numpy as", "sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))] # Create dataset", "EmbeddingDataset from torchvision import transforms import torchvision.models as models from torch.utils.data import DataLoader", "QueryExtractor, EmbeddingDataset from torchvision import transforms import torchvision.models as models from torch.utils.data import", "crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])), ]) # Creat image", "for file in sorted(os.listdir(img_dir))] # Create dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader", "images fts_dir : directory to store the embeddings Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\",", "print(\"Available device = \", device) # Create transforms mean = [0.485, 0.456, 0.406]", "crop in crops])), ]) # Creat image database if \"paris\" in img_dir: print(\">", "Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda parameters use_cuda = torch.cuda.is_available()", "\", device) # Create transforms mean = [0.485, 0.456, 0.406] std = [0.229,", "import torch import gc import os import numpy as np from sklearn.metrics import", "the embeddings Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda", ": directory to store the embeddings Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\")", "model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create features with torch.no_grad(): for idx,", "transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) # Create embedding network resnet_model =", "= image.size() # Get output output = model.get_embedding(image.view(-1, c, h, w)) output =", "import torchvision.models as models from torch.utils.data import DataLoader from utils import perform_pca_on_single_vector def", "import transforms import torchvision.models as models from torch.utils.data import DataLoader from utils import", "import numpy as np from sklearn.metrics import cohen_kappa_score from model import TripletNet, create_embedding_net", "\"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for blacklisted_file", "\"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES =", "path of trained weights img_dir : directory that holds the images fts_dir :", "removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\",", "\"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for blacklisted_file in blacklist:", "# Create dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0,", "cohen_kappa_score from model import TripletNet, create_embedding_net from dataset import QueryExtractor, EmbeddingDataset from torchvision", "get crops image = image.to(device) bs, ncrops, c, h, w = image.size() #", "Create dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False)", "the images fts_dir : directory to store the embeddings Returns: None Eg run:", "reduced (using pca) vectors and save it in the provided feature directory. Args:", "a model weights path, this function creates a triplet network, loads the parameters", "= (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del output, image gc.collect()", "torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])),", "ncrops, c, h, w = image.size() # Get output output = model.get_embedding(image.view(-1, c,", "from torchvision import transforms import torchvision.models as models from torch.utils.data import DataLoader from", "= torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\" if use_cuda else \"cpu\") print(\"Available device", "img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda parameters use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device", "directory. Args: model_weights_path : path of trained weights img_dir : directory that holds", "the dimension reduced (using pca) vectors and save it in the provided feature", "transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops:", "output output = model.get_embedding(image.view(-1, c, h, w)) output = output.view(bs, ncrops, -1).mean(1).cpu().numpy() #", "# Move image to device and get crops image = image.to(device) bs, ncrops,", "sklearn.metrics import cohen_kappa_score from model import TripletNet, create_embedding_net from dataset import QueryExtractor, EmbeddingDataset", "TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create features with torch.no_grad(): for idx, image in", "= perform_pca_on_single_vector(output) # Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir, img_name)", "\"\") save_path = os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del output, image gc.collect() # if", "pca) vectors and save it in the provided feature directory. Args: model_weights_path :", "database if \"paris\" in img_dir: print(\"> Blacklisted images must be removed\") blacklist =", "<gh_stars>10-100 from tqdm import tqdm import torch import gc import os import numpy", "generates the dimension reduced (using pca) vectors and save it in the provided", "\"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files = os.listdir(img_dir) for blacklisted_file in", "Creat image database if \"paris\" in img_dir: print(\"> Blacklisted images must be removed\")", "crops image = image.to(device) bs, ncrops, c, h, w = image.size() # Get", "model.get_embedding(image.view(-1, c, h, w)) output = output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform pca output", "to device and get crops image = image.to(device) bs, ncrops, c, h, w", "\"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\",", "crop in crops])), transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])), ]) #", "in sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))] # Create", "# Create embedding network resnet_model = create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval()", "torch.no_grad(): for idx, image in enumerate(tqdm(eval_loader)): # Move image to device and get", "torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])), ]) # Creat image database if \"paris\"", "image = image.to(device) bs, ncrops, c, h, w = image.size() # Get output", "= torch.device(\"cuda\" if use_cuda else \"cpu\") print(\"Available device = \", device) # Create", "to store the embeddings Returns: None Eg run: create_embeddings_db_pca(\"./weights/oxbuild-exp-3.pth\", img_dir=\"./data/oxbuild/images/\", fts_dir=\"./fts_pca/oxbuild/\") \"\"\" #", "\"cpu\") print(\"Available device = \", device) # Create transforms mean = [0.485, 0.456,", "= create_embedding_net() model = TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create features with torch.no_grad():", "= [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\", \"paris_pantheon_000284.jpg\", \"paris_pantheon_000960.jpg\", \"paris_pantheon_000974.jpg\", \"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\",", "vectors and save it in the provided feature directory. Args: model_weights_path : path", "= [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] transforms_test = transforms.Compose([transforms.Resize(460), transforms.FiveCrop(448),", "output = output.view(bs, ncrops, -1).mean(1).cpu().numpy() # Perform pca output = perform_pca_on_single_vector(output) # Save", "torch.manual_seed(2019) device = torch.device(\"cuda\" if use_cuda else \"cpu\") print(\"Available device = \", device)", "weights path, this function creates a triplet network, loads the parameters and generates", "dataset eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test) eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False) #", "img_dir: print(\"> Blacklisted images must be removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\",", "create_embeddings_db_pca(model_weights_path, img_dir, fts_dir): \"\"\" Given a model weights path, this function creates a", "transforms import torchvision.models as models from torch.utils.data import DataLoader from utils import perform_pca_on_single_vector", "pca output = perform_pca_on_single_vector(output) # Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path =", "= TripletNet(resnet_model) model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create features with torch.no_grad(): for idx, image", "Create features with torch.no_grad(): for idx, image in enumerate(tqdm(eval_loader)): # Move image to", "from sklearn.metrics import cohen_kappa_score from model import TripletNet, create_embedding_net from dataset import QueryExtractor,", "else \"cpu\") print(\"Available device = \", device) # Create transforms mean = [0.485,", "files = os.listdir(img_dir) for blacklisted_file in blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file) for", "print(\"> Blacklisted images must be removed\") blacklist = [\"paris_louvre_000136.jpg\", \"paris_louvre_000146.jpg\", \"paris_moulinrouge_000422.jpg\", \"paris_museedorsay_001059.jpg\", \"paris_notredame_000188.jpg\",", "# Create features with torch.no_grad(): for idx, image in enumerate(tqdm(eval_loader)): # Move image", "torch import gc import os import numpy as np from sklearn.metrics import cohen_kappa_score", "model.load_state_dict(torch.load(model_weights_path)) model.to(device) model.eval() # Create features with torch.no_grad(): for idx, image in enumerate(tqdm(eval_loader)):", "in blacklist: files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(files)] else: QUERY_IMAGES", "fts_dir=\"./fts_pca/oxbuild/\") \"\"\" # Create cuda parameters use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device =", "np from sklearn.metrics import cohen_kappa_score from model import TripletNet, create_embedding_net from dataset import", "model.eval() # Create features with torch.no_grad(): for idx, image in enumerate(tqdm(eval_loader)): # Move", "w = image.size() # Get output output = model.get_embedding(image.view(-1, c, h, w)) output", "dataset import QueryExtractor, EmbeddingDataset from torchvision import transforms import torchvision.models as models from", "-1).mean(1).cpu().numpy() # Perform pca output = perform_pca_on_single_vector(output) # Save fts img_name = (QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\",", "\"paris_pompidou_000195.jpg\", \"paris_pompidou_000196.jpg\", \"paris_pompidou_000201.jpg\", \"paris_pompidou_000467.jpg\", \"paris_pompidou_000640.jpg\", \"paris_sacrecoeur_000299.jpg\", \"paris_sacrecoeur_000330.jpg\", \"paris_sacrecoeur_000353.jpg\", \"paris_triomphe_000662.jpg\", \"paris_triomphe_000833.jpg\", \"paris_triomphe_000863.jpg\", \"paris_triomphe_000867.jpg\",] files", "cuda parameters use_cuda = torch.cuda.is_available() np.random.seed(2019) torch.manual_seed(2019) device = torch.device(\"cuda\" if use_cuda else", "idx, image in enumerate(tqdm(eval_loader)): # Move image to device and get crops image", "model.to(device) model.eval() # Create features with torch.no_grad(): for idx, image in enumerate(tqdm(eval_loader)): #", "crops])), ]) # Creat image database if \"paris\" in img_dir: print(\"> Blacklisted images", "(QUERY_IMAGES[idx].split(\"/\")[-1]).replace(\".jpg\", \"\") save_path = os.path.join(fts_dir, img_name) np.save(save_path, output.flatten()) del output, image gc.collect() #", "files.remove(blacklisted_file) QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(files)] else: QUERY_IMAGES = [os.path.join(img_dir," ]
[ "<reponame>VasseurLaurent/ansible-nwd import glob import os def get_list_files(path,list_files): for filename in glob.iglob(path + '**/*',", "glob import os def get_list_files(path,list_files): for filename in glob.iglob(path + '**/*', recursive=True): if", "import glob import os def get_list_files(path,list_files): for filename in glob.iglob(path + '**/*', recursive=True):", "filename in glob.iglob(path + '**/*', recursive=True): if os.path.isfile(filename): relative_paths = os.path.relpath(filename, path) list_files.append(relative_paths)", "def get_list_files(path,list_files): for filename in glob.iglob(path + '**/*', recursive=True): if os.path.isfile(filename): relative_paths =", "for filename in glob.iglob(path + '**/*', recursive=True): if os.path.isfile(filename): relative_paths = os.path.relpath(filename, path)", "get_list_files(path,list_files): for filename in glob.iglob(path + '**/*', recursive=True): if os.path.isfile(filename): relative_paths = os.path.relpath(filename,", "import os def get_list_files(path,list_files): for filename in glob.iglob(path + '**/*', recursive=True): if os.path.isfile(filename):", "glob.iglob(path + '**/*', recursive=True): if os.path.isfile(filename): relative_paths = os.path.relpath(filename, path) list_files.append(relative_paths) return list_files", "in glob.iglob(path + '**/*', recursive=True): if os.path.isfile(filename): relative_paths = os.path.relpath(filename, path) list_files.append(relative_paths) return", "os def get_list_files(path,list_files): for filename in glob.iglob(path + '**/*', recursive=True): if os.path.isfile(filename): relative_paths" ]
[ "as plt from matplotlib.animation import FuncAnimation N = 100 world = np.zeros((N, N))", "0 pos_y = -8 player_score_text_handle = plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle = plt.text(0,", "= 1 world[0,0] = amoebe_mark perimeters = get_neighs(*starting_point) #################################################################### fig = plt.figure() im", "N: neighs.append([i+1,j]) if j+1 < N: neighs.append([i,j+1]) if 0 <= i-1: neighs.append([i-1,j]) if", "pos_y = -8 player_score_text_handle = plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle = plt.text(0, -3,", "plt from matplotlib.animation import FuncAnimation N = 100 world = np.zeros((N, N)) def", "interval_ms = 50 anim = FuncAnimation(fig, animate, frames=2000, interval = interval_ms, repeat =", "\"perimeter blocks: 0\") def animate(i): global perimeters, world, im random_index = np.random.randint(0, len(perimeters))", "random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms =", "int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord] amoebe = [starting_point] amoebe_mark = 1 world[0,0] =", "= [starting_point_coord, starting_point_coord] amoebe = [starting_point] amoebe_mark = 1 world[0,0] = amoebe_mark perimeters", "as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation N = 100", "N)) def get_neighs(i,j): neighs = [] if i+1 < N: neighs.append([i+1,j]) if j+1", "def animate(i): global perimeters, world, im random_index = np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index)", "= perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i))", "0 <= j-1: neighs.append([i,j-1]) return neighs starting_point_coord = int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord]", "= FuncAnimation(fig, animate, frames=2000, interval = interval_ms, repeat = False) plt.axis('off') anim.save(\"anim.mp4\") ####################################################################", "<= i-1: neighs.append([i-1,j]) if 0 <= j-1: neighs.append([i,j-1]) return neighs starting_point_coord = int(np.floor(N/2))", "matplotlib.animation import FuncAnimation N = 100 world = np.zeros((N, N)) def get_neighs(i,j): neighs", "\" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms = 50 anim = FuncAnimation(fig, animate,", "if 0 <= i-1: neighs.append([i-1,j]) if 0 <= j-1: neighs.append([i,j-1]) return neighs starting_point_coord", "def get_neighs(i,j): neighs = [] if i+1 < N: neighs.append([i+1,j]) if j+1 <", "amoebe = [starting_point] amoebe_mark = 1 world[0,0] = amoebe_mark perimeters = get_neighs(*starting_point) ####################################################################", "fig = plt.figure() im = plt.imshow(world) pos_x = 0 pos_y = -8 player_score_text_handle", "amoebe_mark perimeters = get_neighs(*starting_point) #################################################################### fig = plt.figure() im = plt.imshow(world) pos_x =", "pos_y, \"blocks: 0\") perimeter_score_text_handle = plt.text(0, -3, \"perimeter blocks: 0\") def animate(i): global", "neighs.append([i-1,j]) if 0 <= j-1: neighs.append([i,j-1]) return neighs starting_point_coord = int(np.floor(N/2)) starting_point =", "[] if i+1 < N: neighs.append([i+1,j]) if j+1 < N: neighs.append([i,j+1]) if 0", "neighs.append([i+1,j]) if j+1 < N: neighs.append([i,j+1]) if 0 <= i-1: neighs.append([i-1,j]) if 0", "animate(i): global perimeters, world, im random_index = np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter)", "0 <= i-1: neighs.append([i-1,j]) if 0 <= j-1: neighs.append([i,j-1]) return neighs starting_point_coord =", "new_perimeters.append(neigh) ####### perimeters = perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player:", "im random_index = np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter) #", "N = 100 world = np.zeros((N, N)) def get_neighs(i,j): neighs = [] if", "get_neighs(i,j): neighs = [] if i+1 < N: neighs.append([i+1,j]) if j+1 < N:", "perimeters = get_neighs(*starting_point) #################################################################### fig = plt.figure() im = plt.imshow(world) pos_x = 0", "= 0 pos_y = -8 player_score_text_handle = plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle =", "<gh_stars>0 import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation", "50 anim = FuncAnimation(fig, animate, frames=2000, interval = interval_ms, repeat = False) plt.axis('off')", "neigh[1]] != amoebe_mark and neigh not in perimeters: new_perimeters.append(neigh) ####### perimeters = perimeters", "import FuncAnimation N = 100 world = np.zeros((N, N)) def get_neighs(i,j): neighs =", "+ str(len(perimeters))) interval_ms = 50 anim = FuncAnimation(fig, animate, frames=2000, interval = interval_ms,", "amoebe_mark = 1 world[0,0] = amoebe_mark perimeters = get_neighs(*starting_point) #################################################################### fig = plt.figure()", "[starting_point] amoebe_mark = 1 world[0,0] = amoebe_mark perimeters = get_neighs(*starting_point) #################################################################### fig =", "= amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms = 50", "perimeters: new_perimeters.append(neigh) ####### perimeters = perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world)", "<= j-1: neighs.append([i,j-1]) return neighs starting_point_coord = int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord] amoebe", "= amoebe_mark perimeters = get_neighs(*starting_point) #################################################################### fig = plt.figure() im = plt.imshow(world) pos_x", "i+1 < N: neighs.append([i+1,j]) if j+1 < N: neighs.append([i,j+1]) if 0 <= i-1:", "= perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter) # filter inner points new_perimeters = []", "perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms = 50 anim = FuncAnimation(fig, animate, frames=2000, interval =", "neighs: if world[neigh[0], neigh[1]] != amoebe_mark and neigh not in perimeters: new_perimeters.append(neigh) #######", "random_index = np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter) # filter", "neighs = get_neighs(*random_perimeter) # filter inner points new_perimeters = [] for neigh in", "plt.imshow(world) pos_x = 0 pos_y = -8 player_score_text_handle = plt.text(pos_x, pos_y, \"blocks: 0\")", "perimeters = perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" +", "#################################################################### fig = plt.figure() im = plt.imshow(world) pos_x = 0 pos_y = -8", "[starting_point_coord, starting_point_coord] amoebe = [starting_point] amoebe_mark = 1 world[0,0] = amoebe_mark perimeters =", "+ new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" +", "inner points new_perimeters = [] for neigh in neighs: if world[neigh[0], neigh[1]] !=", "if 0 <= j-1: neighs.append([i,j-1]) return neighs starting_point_coord = int(np.floor(N/2)) starting_point = [starting_point_coord,", "starting_point_coord = int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord] amoebe = [starting_point] amoebe_mark = 1", "pos_x = 0 pos_y = -8 player_score_text_handle = plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle", "if world[neigh[0], neigh[1]] != amoebe_mark and neigh not in perimeters: new_perimeters.append(neigh) ####### perimeters", "im = plt.imshow(world) pos_x = 0 pos_y = -8 player_score_text_handle = plt.text(pos_x, pos_y,", "get_neighs(*starting_point) #################################################################### fig = plt.figure() im = plt.imshow(world) pos_x = 0 pos_y =", "= [] if i+1 < N: neighs.append([i+1,j]) if j+1 < N: neighs.append([i,j+1]) if", "= plt.imshow(world) pos_x = 0 pos_y = -8 player_score_text_handle = plt.text(pos_x, pos_y, \"blocks:", "= get_neighs(*random_perimeter) # filter inner points new_perimeters = [] for neigh in neighs:", "str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms = 50 anim = FuncAnimation(fig, animate, frames=2000, interval", "plt.figure() im = plt.imshow(world) pos_x = 0 pos_y = -8 player_score_text_handle = plt.text(pos_x,", "neighs.append([i,j+1]) if 0 <= i-1: neighs.append([i-1,j]) if 0 <= j-1: neighs.append([i,j-1]) return neighs", "neigh not in perimeters: new_perimeters.append(neigh) ####### perimeters = perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]]", "FuncAnimation N = 100 world = np.zeros((N, N)) def get_neighs(i,j): neighs = []", "# filter inner points new_perimeters = [] for neigh in neighs: if world[neigh[0],", "= -8 player_score_text_handle = plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle = plt.text(0, -3, \"perimeter", "neighs starting_point_coord = int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord] amoebe = [starting_point] amoebe_mark =", "starting_point_coord] amoebe = [starting_point] amoebe_mark = 1 world[0,0] = amoebe_mark perimeters = get_neighs(*starting_point)", "len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter) # filter inner points new_perimeters", "np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter) # filter inner points", "random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter) # filter inner points new_perimeters =", "player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms = 50 anim = FuncAnimation(fig,", "import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation N = 100 world =", "< N: neighs.append([i+1,j]) if j+1 < N: neighs.append([i,j+1]) if 0 <= i-1: neighs.append([i-1,j])", "perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\"", "for neigh in neighs: if world[neigh[0], neigh[1]] != amoebe_mark and neigh not in", "= [starting_point] amoebe_mark = 1 world[0,0] = amoebe_mark perimeters = get_neighs(*starting_point) #################################################################### fig", "and neigh not in perimeters: new_perimeters.append(neigh) ####### perimeters = perimeters + new_perimeters world[random_perimeter[0],", "filter inner points new_perimeters = [] for neigh in neighs: if world[neigh[0], neigh[1]]", "world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms", "numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation N =", "!= amoebe_mark and neigh not in perimeters: new_perimeters.append(neigh) ####### perimeters = perimeters +", "starting_point = [starting_point_coord, starting_point_coord] amoebe = [starting_point] amoebe_mark = 1 world[0,0] = amoebe_mark", "= int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord] amoebe = [starting_point] amoebe_mark = 1 world[0,0]", "np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation N = 100 world", "neigh in neighs: if world[neigh[0], neigh[1]] != amoebe_mark and neigh not in perimeters:", "return neighs starting_point_coord = int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord] amoebe = [starting_point] amoebe_mark", "neighs.append([i,j-1]) return neighs starting_point_coord = int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord] amoebe = [starting_point]", "= get_neighs(*starting_point) #################################################################### fig = plt.figure() im = plt.imshow(world) pos_x = 0 pos_y", "player_score_text_handle = plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle = plt.text(0, -3, \"perimeter blocks: 0\")", "= 50 anim = FuncAnimation(fig, animate, frames=2000, interval = interval_ms, repeat = False)", "if i+1 < N: neighs.append([i+1,j]) if j+1 < N: neighs.append([i,j+1]) if 0 <=", "-3, \"perimeter blocks: 0\") def animate(i): global perimeters, world, im random_index = np.random.randint(0,", "= plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle = plt.text(0, -3, \"perimeter blocks: 0\") def", "get_neighs(*random_perimeter) # filter inner points new_perimeters = [] for neigh in neighs: if", "neighs = [] if i+1 < N: neighs.append([i+1,j]) if j+1 < N: neighs.append([i,j+1])", "= [] for neigh in neighs: if world[neigh[0], neigh[1]] != amoebe_mark and neigh", "j+1 < N: neighs.append([i,j+1]) if 0 <= i-1: neighs.append([i-1,j]) if 0 <= j-1:", "if j+1 < N: neighs.append([i,j+1]) if 0 <= i-1: neighs.append([i-1,j]) if 0 <=", "[] for neigh in neighs: if world[neigh[0], neigh[1]] != amoebe_mark and neigh not", "j-1: neighs.append([i,j-1]) return neighs starting_point_coord = int(np.floor(N/2)) starting_point = [starting_point_coord, starting_point_coord] amoebe =", "world, im random_index = np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter)", "perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter) # filter inner points new_perimeters = [] for", "-8 player_score_text_handle = plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle = plt.text(0, -3, \"perimeter blocks:", "= 100 world = np.zeros((N, N)) def get_neighs(i,j): neighs = [] if i+1", "blocks: 0\") def animate(i): global perimeters, world, im random_index = np.random.randint(0, len(perimeters)) random_perimeter", "np.zeros((N, N)) def get_neighs(i,j): neighs = [] if i+1 < N: neighs.append([i+1,j]) if", "= plt.figure() im = plt.imshow(world) pos_x = 0 pos_y = -8 player_score_text_handle =", "anim = FuncAnimation(fig, animate, frames=2000, interval = interval_ms, repeat = False) plt.axis('off') anim.save(\"anim.mp4\")", "= np.zeros((N, N)) def get_neighs(i,j): neighs = [] if i+1 < N: neighs.append([i+1,j])", "import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation N", "from matplotlib.animation import FuncAnimation N = 100 world = np.zeros((N, N)) def get_neighs(i,j):", "0\") def animate(i): global perimeters, world, im random_index = np.random.randint(0, len(perimeters)) random_perimeter =", "im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms = 50 anim =", "plt.text(0, -3, \"perimeter blocks: 0\") def animate(i): global perimeters, world, im random_index =", "world = np.zeros((N, N)) def get_neighs(i,j): neighs = [] if i+1 < N:", "= np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs = get_neighs(*random_perimeter) # filter inner", "amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms = 50 anim", "< N: neighs.append([i,j+1]) if 0 <= i-1: neighs.append([i-1,j]) if 0 <= j-1: neighs.append([i,j-1])", "i-1: neighs.append([i-1,j]) if 0 <= j-1: neighs.append([i,j-1]) return neighs starting_point_coord = int(np.floor(N/2)) starting_point", "1 world[0,0] = amoebe_mark perimeters = get_neighs(*starting_point) #################################################################### fig = plt.figure() im =", "100 world = np.zeros((N, N)) def get_neighs(i,j): neighs = [] if i+1 <", "in perimeters: new_perimeters.append(neigh) ####### perimeters = perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark", "print(random_perimeter) neighs = get_neighs(*random_perimeter) # filter inner points new_perimeters = [] for neigh", "plt.text(pos_x, pos_y, \"blocks: 0\") perimeter_score_text_handle = plt.text(0, -3, \"perimeter blocks: 0\") def animate(i):", "new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \" + str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters)))", "new_perimeters = [] for neigh in neighs: if world[neigh[0], neigh[1]] != amoebe_mark and", "world[neigh[0], neigh[1]] != amoebe_mark and neigh not in perimeters: new_perimeters.append(neigh) ####### perimeters =", "matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation N = 100 world = np.zeros((N,", "+ str(i)) perimeter_score_text_handle.set_text(\"perimeter:\" + str(len(perimeters))) interval_ms = 50 anim = FuncAnimation(fig, animate, frames=2000,", "amoebe_mark and neigh not in perimeters: new_perimeters.append(neigh) ####### perimeters = perimeters + new_perimeters", "####### perimeters = perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]] = amoebe_mark im.set_array(world) player_score_text_handle.set_text(\"player: \"", "in neighs: if world[neigh[0], neigh[1]] != amoebe_mark and neigh not in perimeters: new_perimeters.append(neigh)", "points new_perimeters = [] for neigh in neighs: if world[neigh[0], neigh[1]] != amoebe_mark", "perimeters, world, im random_index = np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs =", "world[0,0] = amoebe_mark perimeters = get_neighs(*starting_point) #################################################################### fig = plt.figure() im = plt.imshow(world)", "str(len(perimeters))) interval_ms = 50 anim = FuncAnimation(fig, animate, frames=2000, interval = interval_ms, repeat", "not in perimeters: new_perimeters.append(neigh) ####### perimeters = perimeters + new_perimeters world[random_perimeter[0], random_perimeter[1]] =", "\"blocks: 0\") perimeter_score_text_handle = plt.text(0, -3, \"perimeter blocks: 0\") def animate(i): global perimeters,", "global perimeters, world, im random_index = np.random.randint(0, len(perimeters)) random_perimeter = perimeters.pop(random_index) print(random_perimeter) neighs", "= plt.text(0, -3, \"perimeter blocks: 0\") def animate(i): global perimeters, world, im random_index", "N: neighs.append([i,j+1]) if 0 <= i-1: neighs.append([i-1,j]) if 0 <= j-1: neighs.append([i,j-1]) return", "FuncAnimation(fig, animate, frames=2000, interval = interval_ms, repeat = False) plt.axis('off') anim.save(\"anim.mp4\") #################################################################### #plt.show()", "perimeter_score_text_handle = plt.text(0, -3, \"perimeter blocks: 0\") def animate(i): global perimeters, world, im", "0\") perimeter_score_text_handle = plt.text(0, -3, \"perimeter blocks: 0\") def animate(i): global perimeters, world," ]
[ "from core.models import action, conduct, webui from core import helpers, logging, cache, settings", "continueFlow(self): if self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events", "= { \"first\" : True, \"current\" : 0, \"total\" : 1, \"last\" :", "\"input\", \"schemaitem\" : \"comment\", \"textbox\" : classObject.comment}) # return formData def __init__(self): self.events", ": 0, \"total\" : 1, \"last\" : True } self.data[\"persistentData\"][\"system\"][\"conduct\"].triggerHandler(self.data[\"flowData\"][\"flow_id\"],tempDataCopy,flowIDType=True) def postRun(self): self.continueFlow()", "\"checkbox\", \"schemaitem\" : \"log\", \"checked\" : classObject.log}) # formData.append({\"type\" : \"input\", \"schemaitem\" :", "if self.limit != 0 and self.limit < len(self.events): self.continueFlow() # Returning false to", "[] def doAction(self,data): try: if \"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return { \"result\"", "False, \"rc\" : 9 } def continueFlow(self): if self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"]", "9 } def continueFlow(self): if self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"]", "of events to collect before resuming\"}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"enabled\",", "[] # formData.append({\"type\" : \"input\", \"schemaitem\" : \"_id\", \"textbox\" : classObject._id}) # formData.append({\"type\"", "# formData.append({\"type\" : \"input\", \"schemaitem\" : \"comment\", \"textbox\" : classObject.comment}) # return formData", "formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"log\", \"checked\" : classObject.log}) # formData.append({\"type\" : \"input\",", ": \"limit\", \"textbox\" : classObject.limit, \"tooltip\" : \"Defines the number of events to", "if \"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return { \"result\" : True, \"rc\" :", "return { \"result\" : False, \"rc\" : 9 } def continueFlow(self): if self.events:", "helpers, logging, cache, settings class _collect(action._action): limit = int() # class _properties(webui._properties): #", "\"schemaitem\" : \"comment\", \"textbox\" : classObject.comment}) # return formData def __init__(self): self.events =", "# formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"log\", \"checked\" : classObject.log}) # formData.append({\"type\" :", "\"rc\" : 0 } except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data if self.limit", ": 0 } except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data if self.limit !=", "{ \"result\" : True, \"rc\" : 0 } except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data", "True, \"current\" : 0, \"total\" : 1, \"last\" : True } self.data[\"persistentData\"][\"system\"][\"conduct\"].triggerHandler(self.data[\"flowData\"][\"flow_id\"],tempDataCopy,flowIDType=True) def", "if self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events =", "\"schemaitem\" : \"_id\", \"textbox\" : classObject._id}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"name\",", "formData.append({\"type\" : \"input\", \"schemaitem\" : \"name\", \"textbox\" : classObject.name}) # formData.append({\"type\" : \"input\",", "doAction(self,data): try: if \"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return { \"result\" : True,", "= [] def doAction(self,data): try: if \"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return {", ": classObject.comment}) # return formData def __init__(self): self.events = [] def doAction(self,data): try:", "0 } except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data if self.limit != 0", "\"comment\", \"textbox\" : classObject.comment}) # return formData def __init__(self): self.events = [] def", ": 9 } def continueFlow(self): if self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events", "self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events = []", ": False, \"rc\" : 9 } def continueFlow(self): if self.events: tempDataCopy = conduct.copyData(self.data)", "tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"]", "the number of events to collect before resuming\"}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\"", ": \"checkbox\", \"schemaitem\" : \"log\", \"checked\" : classObject.log}) # formData.append({\"type\" : \"input\", \"schemaitem\"", "# class _properties(webui._properties): # def generate(self,classObject): # formData = [] # formData.append({\"type\" :", "= data if self.limit != 0 and self.limit < len(self.events): self.continueFlow() # Returning", "core import helpers, logging, cache, settings class _collect(action._action): limit = int() # class", "formData def __init__(self): self.events = [] def doAction(self,data): try: if \"skip\" in data[\"flowData\"]:", "= [] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" : True, \"current\" : 0, \"total\" :", "< len(self.events): self.continueFlow() # Returning false to stop flow continue return { \"result\"", "\"textbox\" : classObject.name}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"limit\", \"textbox\" : classObject.limit,", ": True, \"rc\" : 0 } except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data", "\"Defines the number of events to collect before resuming\"}) # formData.append({\"type\" : \"checkbox\",", "conduct, webui from core import helpers, logging, cache, settings class _collect(action._action): limit =", "cache, settings class _collect(action._action): limit = int() # class _properties(webui._properties): # def generate(self,classObject):", "<gh_stars>1-10 import time from core.models import action, conduct, webui from core import helpers,", "{ \"result\" : False, \"rc\" : 9 } def continueFlow(self): if self.events: tempDataCopy", "} def continueFlow(self): if self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] =", "def generate(self,classObject): # formData = [] # formData.append({\"type\" : \"input\", \"schemaitem\" : \"_id\",", "class _collect(action._action): limit = int() # class _properties(webui._properties): # def generate(self,classObject): # formData", "data if self.limit != 0 and self.limit < len(self.events): self.continueFlow() # Returning false", "except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data if self.limit != 0 and self.limit", "self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" : True,", "webui from core import helpers, logging, cache, settings class _collect(action._action): limit = int()", "class _properties(webui._properties): # def generate(self,classObject): # formData = [] # formData.append({\"type\" : \"input\",", "\"result\" : True, \"rc\" : 0 } except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data =", "resuming\"}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"enabled\", \"checked\" : classObject.enabled}) # formData.append({\"type\"", "= self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" :", "# formData = [] # formData.append({\"type\" : \"input\", \"schemaitem\" : \"_id\", \"textbox\" :", "\"schemaitem\" : \"log\", \"checked\" : classObject.log}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"comment\",", "\"name\", \"textbox\" : classObject.name}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"limit\", \"textbox\" :", "return { \"result\" : True, \"rc\" : 0 } except KeyError: pass self.events.append(data[\"flowData\"][\"event\"])", "\"first\" : True, \"current\" : 0, \"total\" : 1, \"last\" : True }", "number of events to collect before resuming\"}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" :", "1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" : True, \"current\" : 0,", "pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data if self.limit != 0 and self.limit < len(self.events):", ": classObject.enabled}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"log\", \"checked\" : classObject.log}) #", "continue return { \"result\" : False, \"rc\" : 9 } def continueFlow(self): if", "action, conduct, webui from core import helpers, logging, cache, settings class _collect(action._action): limit", "_collect(action._action): limit = int() # class _properties(webui._properties): # def generate(self,classObject): # formData =", "limit = int() # class _properties(webui._properties): # def generate(self,classObject): # formData = []", "\"checkbox\", \"schemaitem\" : \"enabled\", \"checked\" : classObject.enabled}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" :", "\"_id\", \"textbox\" : classObject._id}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"name\", \"textbox\" :", "# def generate(self,classObject): # formData = [] # formData.append({\"type\" : \"input\", \"schemaitem\" :", "# return formData def __init__(self): self.events = [] def doAction(self,data): try: if \"skip\"", "def doAction(self,data): try: if \"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return { \"result\" :", "\"schemaitem\" : \"limit\", \"textbox\" : classObject.limit, \"tooltip\" : \"Defines the number of events", "core.models import action, conduct, webui from core import helpers, logging, cache, settings class", "import helpers, logging, cache, settings class _collect(action._action): limit = int() # class _properties(webui._properties):", "True, \"rc\" : 0 } except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data if", "\"input\", \"schemaitem\" : \"_id\", \"textbox\" : classObject._id}) # formData.append({\"type\" : \"input\", \"schemaitem\" :", "formData = [] # formData.append({\"type\" : \"input\", \"schemaitem\" : \"_id\", \"textbox\" : classObject._id})", "tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\"", ": \"_id\", \"textbox\" : classObject._id}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"name\", \"textbox\"", ": \"checkbox\", \"schemaitem\" : \"enabled\", \"checked\" : classObject.enabled}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\"", ": \"input\", \"schemaitem\" : \"_id\", \"textbox\" : classObject._id}) # formData.append({\"type\" : \"input\", \"schemaitem\"", "logging, cache, settings class _collect(action._action): limit = int() # class _properties(webui._properties): # def", "data[\"flowData\"][\"skip\"] return { \"result\" : True, \"rc\" : 0 } except KeyError: pass", "_properties(webui._properties): # def generate(self,classObject): # formData = [] # formData.append({\"type\" : \"input\", \"schemaitem\"", "generate(self,classObject): # formData = [] # formData.append({\"type\" : \"input\", \"schemaitem\" : \"_id\", \"textbox\"", ": True, \"current\" : 0, \"total\" : 1, \"last\" : True } self.data[\"persistentData\"][\"system\"][\"conduct\"].triggerHandler(self.data[\"flowData\"][\"flow_id\"],tempDataCopy,flowIDType=True)", ": classObject._id}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"name\", \"textbox\" : classObject.name}) #", "0 and self.limit < len(self.events): self.continueFlow() # Returning false to stop flow continue", "time from core.models import action, conduct, webui from core import helpers, logging, cache,", "# formData.append({\"type\" : \"input\", \"schemaitem\" : \"name\", \"textbox\" : classObject.name}) # formData.append({\"type\" :", "# formData.append({\"type\" : \"input\", \"schemaitem\" : \"_id\", \"textbox\" : classObject._id}) # formData.append({\"type\" :", "try: if \"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return { \"result\" : True, \"rc\"", "\"checked\" : classObject.log}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"comment\", \"textbox\" : classObject.comment})", "classObject.limit, \"tooltip\" : \"Defines the number of events to collect before resuming\"}) #", "events to collect before resuming\"}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"enabled\", \"checked\"", ": \"log\", \"checked\" : classObject.log}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"comment\", \"textbox\"", "\"enabled\", \"checked\" : classObject.enabled}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"log\", \"checked\" :", "= int() # class _properties(webui._properties): # def generate(self,classObject): # formData = [] #", "formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"enabled\", \"checked\" : classObject.enabled}) # formData.append({\"type\" : \"checkbox\",", ": \"Defines the number of events to collect before resuming\"}) # formData.append({\"type\" :", "\"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return { \"result\" : True, \"rc\" : 0", "len(self.events): self.continueFlow() # Returning false to stop flow continue return { \"result\" :", "\"checked\" : classObject.enabled}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"log\", \"checked\" : classObject.log})", "classObject._id}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"name\", \"textbox\" : classObject.name}) # formData.append({\"type\"", "before resuming\"}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"enabled\", \"checked\" : classObject.enabled}) #", "del data[\"flowData\"][\"skip\"] return { \"result\" : True, \"rc\" : 0 } except KeyError:", "\"tooltip\" : \"Defines the number of events to collect before resuming\"}) # formData.append({\"type\"", "KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data if self.limit != 0 and self.limit <", "\"input\", \"schemaitem\" : \"name\", \"textbox\" : classObject.name}) # formData.append({\"type\" : \"input\", \"schemaitem\" :", "collect before resuming\"}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"enabled\", \"checked\" : classObject.enabled})", ": classObject.limit, \"tooltip\" : \"Defines the number of events to collect before resuming\"})", "def continueFlow(self): if self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] = 1", ": \"input\", \"schemaitem\" : \"comment\", \"textbox\" : classObject.comment}) # return formData def __init__(self):", "# formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"enabled\", \"checked\" : classObject.enabled}) # formData.append({\"type\" :", "tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" : True, \"current\" : 0, \"total\" : 1, \"last\"", "classObject.name}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"limit\", \"textbox\" : classObject.limit, \"tooltip\" :", "{ \"first\" : True, \"current\" : 0, \"total\" : 1, \"last\" : True", "return formData def __init__(self): self.events = [] def doAction(self,data): try: if \"skip\" in", "self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" : True, \"current\" : 0, \"total\"", "__init__(self): self.events = [] def doAction(self,data): try: if \"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"]", "\"current\" : 0, \"total\" : 1, \"last\" : True } self.data[\"persistentData\"][\"system\"][\"conduct\"].triggerHandler(self.data[\"flowData\"][\"flow_id\"],tempDataCopy,flowIDType=True) def postRun(self):", "formData.append({\"type\" : \"input\", \"schemaitem\" : \"limit\", \"textbox\" : classObject.limit, \"tooltip\" : \"Defines the", "int() # class _properties(webui._properties): # def generate(self,classObject): # formData = [] # formData.append({\"type\"", "!= 0 and self.limit < len(self.events): self.continueFlow() # Returning false to stop flow", "stop flow continue return { \"result\" : False, \"rc\" : 9 } def", "= 1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" : True, \"current\" :", ": \"enabled\", \"checked\" : classObject.enabled}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"log\", \"checked\"", "self.events.append(data[\"flowData\"][\"event\"]) self.data = data if self.limit != 0 and self.limit < len(self.events): self.continueFlow()", "[] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" : True, \"current\" : 0, \"total\" : 1,", "\"input\", \"schemaitem\" : \"limit\", \"textbox\" : classObject.limit, \"tooltip\" : \"Defines the number of", "conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] = {", "self.data = data if self.limit != 0 and self.limit < len(self.events): self.continueFlow() #", "\"textbox\" : classObject.comment}) # return formData def __init__(self): self.events = [] def doAction(self,data):", "import action, conduct, webui from core import helpers, logging, cache, settings class _collect(action._action):", "# formData.append({\"type\" : \"input\", \"schemaitem\" : \"limit\", \"textbox\" : classObject.limit, \"tooltip\" : \"Defines", "\"limit\", \"textbox\" : classObject.limit, \"tooltip\" : \"Defines the number of events to collect", "self.continueFlow() # Returning false to stop flow continue return { \"result\" : False,", "to stop flow continue return { \"result\" : False, \"rc\" : 9 }", "from core import helpers, logging, cache, settings class _collect(action._action): limit = int() #", "import time from core.models import action, conduct, webui from core import helpers, logging,", "formData.append({\"type\" : \"input\", \"schemaitem\" : \"comment\", \"textbox\" : classObject.comment}) # return formData def", "settings class _collect(action._action): limit = int() # class _properties(webui._properties): # def generate(self,classObject): #", "classObject.log}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"comment\", \"textbox\" : classObject.comment}) # return", "\"log\", \"checked\" : classObject.log}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"comment\", \"textbox\" :", ": \"name\", \"textbox\" : classObject.name}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"limit\", \"textbox\"", "} except KeyError: pass self.events.append(data[\"flowData\"][\"event\"]) self.data = data if self.limit != 0 and", "false to stop flow continue return { \"result\" : False, \"rc\" : 9", "\"schemaitem\" : \"name\", \"textbox\" : classObject.name}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"limit\",", "def __init__(self): self.events = [] def doAction(self,data): try: if \"skip\" in data[\"flowData\"]: del", "\"schemaitem\" : \"enabled\", \"checked\" : classObject.enabled}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"log\",", "self.events = [] def doAction(self,data): try: if \"skip\" in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return", ": \"input\", \"schemaitem\" : \"limit\", \"textbox\" : classObject.limit, \"tooltip\" : \"Defines the number", "classObject.enabled}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"log\", \"checked\" : classObject.log}) # formData.append({\"type\"", "self.limit != 0 and self.limit < len(self.events): self.continueFlow() # Returning false to stop", "\"textbox\" : classObject.limit, \"tooltip\" : \"Defines the number of events to collect before", ": classObject.log}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"comment\", \"textbox\" : classObject.comment}) #", "and self.limit < len(self.events): self.continueFlow() # Returning false to stop flow continue return", "Returning false to stop flow continue return { \"result\" : False, \"rc\" :", "\"rc\" : 9 } def continueFlow(self): if self.events: tempDataCopy = conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] =", ": classObject.name}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"limit\", \"textbox\" : classObject.limit, \"tooltip\"", ": \"comment\", \"textbox\" : classObject.comment}) # return formData def __init__(self): self.events = []", "classObject.comment}) # return formData def __init__(self): self.events = [] def doAction(self,data): try: if", "self.limit < len(self.events): self.continueFlow() # Returning false to stop flow continue return {", "\"result\" : False, \"rc\" : 9 } def continueFlow(self): if self.events: tempDataCopy =", "flow continue return { \"result\" : False, \"rc\" : 9 } def continueFlow(self):", ": \"input\", \"schemaitem\" : \"name\", \"textbox\" : classObject.name}) # formData.append({\"type\" : \"input\", \"schemaitem\"", "# Returning false to stop flow continue return { \"result\" : False, \"rc\"", "= conduct.copyData(self.data) tempDataCopy[\"flowData\"][\"event\"] = self.events tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] =", "in data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return { \"result\" : True, \"rc\" : 0 }", "data[\"flowData\"]: del data[\"flowData\"][\"skip\"] return { \"result\" : True, \"rc\" : 0 } except", "\"textbox\" : classObject._id}) # formData.append({\"type\" : \"input\", \"schemaitem\" : \"name\", \"textbox\" : classObject.name})", "= [] # formData.append({\"type\" : \"input\", \"schemaitem\" : \"_id\", \"textbox\" : classObject._id}) #", "tempDataCopy[\"flowData\"][\"skip\"] = 1 self.events = [] tempDataCopy[\"flowData\"][\"eventStats\"] = { \"first\" : True, \"current\"", "to collect before resuming\"}) # formData.append({\"type\" : \"checkbox\", \"schemaitem\" : \"enabled\", \"checked\" :", "formData.append({\"type\" : \"input\", \"schemaitem\" : \"_id\", \"textbox\" : classObject._id}) # formData.append({\"type\" : \"input\"," ]
[ "the License. ''' # USAGE # python face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold", "np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype = \"double\"", "import cv2 import numpy as np import vart import pathlib import xir import", "= 0.35)\") args = vars(ap.parse_args()) if not args.get(\"input\",False): inputId = 0 else: inputId", "print(\"[INFO] face landmarks = DLIB\") else: print(\"[INFO] face landmarks = VART\") # Update", "2) cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2) # draw head pose vector p1", "face detector faces = dpu_face_detector.process(frame) #print(faces) if use_dlib_detection == True: # DLIB based", "rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps = 10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message)", "now) (landmarks[0,0], landmarks[0,1]), # Left eye left corner (landmarks[1,0], landmarks[1,1]), # Right eye", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "#print(faces) if use_dlib_detection == True: # DLIB based face detector dlib_faces = dlib_face_detector(dlib_image,", "Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye", "landmarks[i,1] = startY + landmarks[i,1]*heightY # draw landmarks #for i in range(5): #", "the image plane. # We use this to draw a line sticking out", "Display Status status = \"Status :\" status = status + \" FaceDetect=\" if", "Right mouth corner ], dtype=\"double\") # estimate approximate location of chin # let's", "False use_dlib_landmarks = True print(\"[INFO] face detection = VART\") print(\"[INFO] face landmarks =", "if use_dlib_landmarks == True: print(\"[INFO] face landmarks = DLIB\") else: print(\"[INFO] face landmarks", "# Stop the face detector dpu_face_detector.stop() del densebox_dpu dpu_face_landmark.stop() del landmark_dpu # Cleanup", "for i in range(5): landmarks[i,0] = startX + landmarks[i,0]*widthX landmarks[i,1] = startY +", "box surrounding the object so we can # visualize it cv2.rectangle( frame, (left,top),", "`q` key was pressed, break from the loop if key == ord(\"q\"): break", "based face detector faces = dpu_face_detector.process(frame) #print(faces) if use_dlib_detection == True: # DLIB", "DLIB based face detector dlib_faces = dlib_face_detector(dlib_image, 0) for face in dlib_faces: faces.append(", "startY + landmarks[i,1]*heightY # draw landmarks #for i in range(5): # x =", "in enumerate(faces): # draw a bounding box surrounding the object so we can", "else: print(\"[INFO] face detection = VART\") # if the `l` key was pressed,", "# Initialize DLIB based face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) #", "input ...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR] Failed to", "size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]],", "mouth corner ], dtype=\"double\") #print(image_points) # calculate head pose dist_coeffs = np.zeros((4,1)) #", "else: print(\"[INFO] face landmarks = VART\") # Update the real-time FPS counter rt_fps_count", "True: status = status + \"DLIB\" else: status = status + \"VART\" status", "the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help = \"input camera identifier", "Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner ], dtype=\"double\") #print(image_points) #", "int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0), 2) #", "argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help = \"input camera identifier (default = 0)\") ap.add_argument(\"-d\",", "points image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y),", "dpu_face_detector.start() # Initialize Vitis-AI/DPU based face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel)", "cv2.LINE_AA) # Display the processed image cv2.imshow(\"Head Pose Estimation\", frame) key = cv2.waitKey(1)", "pressed, toggle between landmark algorithms if key == ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks", "face detection = VART\") # if the `l` key was pressed, toggle between", "= status + \" \" + rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0),", "not (cam.isOpened()): print(\"[ERROR] Failed to open camera \", inputId ) exit() # 3D", "p2, (255,0,0), 2) # Display Status status = \"Status :\" status = status", "corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner ], dtype=\"double\") #print(image_points) # calculate head", "dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), #", "Copyright 2021 Avnet Inc. Licensed under the Apache License, Version 2.0 (the \"License\");", "(face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop over the faces for i,(left,top,right,bottom) in enumerate(faces): #", "FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU based face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph =", "starting camera input ...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR]", "3D model points. model_points = np.array([ (0.0, 0.0, 0.0), # Nose tip (0.0,", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "identifier = ',inputId) if not args.get(\"detthreshold\",False): detThreshold = 0.55 else: detThreshold = float(args[\"detthreshold\"])", "if not args.get(\"detthreshold\",False): detThreshold = 0.55 else: detThreshold = float(args[\"detthreshold\"]) print('[INFO] face detector", "\"face detector softmax threshold (default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help = \"face", "face landmarks = DLIB\") # Initialize the camera input print(\"[INFO] starting camera input", "np.array([ (landmarks[2,0], landmarks[2,1]), # Nose tip (landmarks[2,0], landmarks[2,1]), # Chin (place-holder for now)", "# Left Mouth corner (landmarks[4,0], landmarks[4,1]) # Right mouth corner ], dtype=\"double\") #", "+ nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points) if use_dlib_landmarks == True: # extract face", "\"Status :\" status = status + \" FaceDetect=\" if use_dlib_detection == True: status", "densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1 # only one DPU kernel densebox_dpu", "+ image_points[5][0])/2; mouth_center_y = (image_points[4][1] + image_points[5][1])/2; image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y", "#cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2) cv2.circle(frame,", "we can # visualize it cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2) # extract", "= endX-startX heightY = endY-startY face = frame[startY:endY, startX:endX] if use_dlib_landmarks == False:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "a line sticking out of the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),", "rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count = 0 # Stop the timer", "camera identifier = ',inputId) if not args.get(\"detthreshold\",False): detThreshold = 0.55 else: detThreshold =", "[0, focal_length, center[1]], [0, 0, 1]], dtype = \"double\" ) print(\"[INFO] Camera Matrix", "ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help = \"input camera identifier (default =", "(225.0, 170.0, -135.0), # Right eye right corne (-150.0, -150.0, -125.0), # Left", "= FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU based face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph", "p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0), 2) # Display Status", "detThreshold = float(args[\"detthreshold\"]) print('[INFO] face detector - softmax threshold = ',detThreshold) if not", "#print(faces) # loop over the faces for i,(left,top,right,bottom) in enumerate(faces): # draw a", "+ 1 if rt_fps_count >= 10: t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid =", "Initialize Vitis-AI/DPU based face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs =", "float(args[\"nmsthreshold\"]) print('[INFO] face detector - NMS threshold = ',nmsThreshold) # Initialize Vitis-AI/DPU based", "camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype", "= \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection = False use_dlib_landmarks =", "if use_dlib_landmarks == False: # extract face landmarks landmarks = dpu_face_landmark.process(face) # calculate", "from typing import List import cv2 import numpy as np import vart import", "startX = int(left) startY = int(top) endX = int(right) endY = int(bottom) #print(", "endY ) widthX = endX-startX heightY = endY-startY face = frame[startY:endY, startX:endX] if", "prepare 2D points image_points = np.array([ (landmarks[2,0], landmarks[2,1]), # Nose tip (landmarks[2,0], landmarks[2,1]),", "= np.array([ (0.0, 0.0, 0.0), # Nose tip (0.0, -330.0, -65.0), # Chin", "10: t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps = 10.0/t rt_fps_message", "status = status + \" FaceDetect=\" if use_dlib_detection == True: status = status", "Estimation\", frame) key = cv2.waitKey(1) & 0xFF # Update the FPS counter fps.update()", "-135.0), # Right eye right corne (-150.0, -150.0, -125.0), # Left Mouth corner", "xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1 # only one DPU kernel", "# start the FPS counter fps = FPS().start() # init the real-time FPS", "loop over the frames from the video stream while True: # Update the", "from imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark import", "pressed, break from the loop if key == ord(\"q\"): break # if the", "the License for the specific language governing permissions and limitations under the License.", "* from typing import List import cv2 import numpy as np import vart", "{0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count = 0 # Stop the timer and display FPS", "corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner", "# extract the face ROI startX = int(left) startY = int(top) endX =", "\"Translation Vector:\\n {0}\".format(translation_vector) # Project a 3D point (0, 0, 1000.0) onto the", "jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p in image_points:", "the `d` key was pressed, toggle between detection algorithms if key == ord(\"d\"):", "1000.0) onto the image plane. # We use this to draw a line", "landmarks[i,0]*widthX landmarks[i,1] = startY + landmarks[i,1]*heightY # draw landmarks #for i in range(5):", "status + \" Landmark=\" if use_dlib_landmarks == True: status = status + \"DLIB\"", "status = status + \"VART\" status = status + \" Landmark=\" if use_dlib_landmarks", "pathlib import xir import os import math import threading import time import sys", "limitations under the License. ''' # USAGE # python face_headpose_dlib.py [--input 0] [--detthreshold", "# Initialize the camera input print(\"[INFO] starting camera input ...\") cam = cv2.VideoCapture(inputId)", "construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\",", "# extract face landmarks landmarks = dpu_face_landmark.process(face) # calculate coordinates for full frame", "-330.0, -65.0), # Chin (-225.0, 170.0, -135.0), # Left eye left corner (225.0,", "dlib_faces = dlib_face_detector(dlib_image, 0) for face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) #", "image_points[3][1])/2; nose_offset_x = (image_points[0][0] - eye_center_x); nose_offset_y = (image_points[0][1] - eye_center_y); mouth_center_x =", "# Display the processed image cv2.imshow(\"Head Pose Estimation\", frame) key = cv2.waitKey(1) &", "if the `q` key was pressed, break from the loop if key ==", "with DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks", "= np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin", "= size[0]-10 # loop over the frames from the video stream while True:", "- rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps = 10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO]", "Unless required by applicable law or agreed to in writing, software distributed under", "= vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU based face landmark landmark_xmodel", "{:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) # Stop the face detector dpu_face_detector.stop() del densebox_dpu", "int(p[1])), 3, (255,255,255), 2) # draw head pose vector p1 = ( int(image_points[0][0]),", "= \"face detector NMS threshold (default = 0.35)\") args = vars(ap.parse_args()) if not", "detector dlib_face_detector = dlib.get_frontal_face_detector() # Initialize DLIB based face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\"", "landmarks = dpu_face_landmark.process(face) # calculate coordinates for full frame for i in range(5):", "= cv2.waitKey(1) & 0xFF # Update the FPS counter fps.update() # if the", "landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB based face detector", "ord(\"q\"): break # if the `d` key was pressed, toggle between detection algorithms", "nmsThreshold = 0.35 else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face detector - NMS threshold", "image_points[5][0])/2; mouth_center_y = (image_points[4][1] + image_points[5][1])/2; image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y +", "vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu import dlib # construct the argument", "camera input print(\"[INFO] starting camera input ...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if", "# visualize it cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2) # extract the face", "detector - NMS threshold = ',nmsThreshold) # Initialize Vitis-AI/DPU based face detector densebox_xmodel", "# Nose tip (landmarks[2,0], landmarks[2,1]), # Chin (place-holder for now) (landmarks[0,0], landmarks[0,1]), #", "assume that the chin location will behave similar as the nose location eye_center_x", "Capture image from camera ret,frame = cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = []", "if use_dlib_detection == False: # Vitis-AI/DPU based face detector faces = dpu_face_detector.process(frame) #print(faces)", "1 # only one DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start()", "(dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner ], dtype=\"double\") #print(image_points) # calculate head pose", "(0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255),", "0.55] [--nmsthreshold 0.35] from ctypes import * from typing import List import cv2", "# Left eye left corner (225.0, 170.0, -135.0), # Right eye right corne", "FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help = \"face detector NMS threshold (default = 0.35)\") args", "status + \" FaceDetect=\" if use_dlib_detection == True: status = status + \"DLIB\"", "Vitis-AI/DPU based face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph)", "License, Version 2.0 (the \"License\"); you may not use this file except in", "endX, startY, endY ) widthX = endX-startX heightY = endY-startY face = frame[startY:endY,", "# prepare 2D points image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part(", "import FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu import dlib #", "= (image_points[0][0] - eye_center_x); nose_offset_y = (image_points[0][1] - eye_center_y); mouth_center_x = (image_points[4][0] +", "prepare 2D points image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part( 8).x,", "# Initialize Vitis-AI/DPU based face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs", "head pose dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion (success, rotation_vector, translation_vector)", "landmarks[3,1]), # Left Mouth corner (landmarks[4,0], landmarks[4,1]) # Right mouth corner ], dtype=\"double\")", "camera_matrix, dist_coeffs) for p in image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1) #cv2.circle(face,", "int(p[1])), 3, (255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2) # draw head", "landmarks = VART\") # Update the real-time FPS counter rt_fps_count = rt_fps_count +", "0xFF # Update the FPS counter fps.update() # if the `q` key was", "Stop the timer and display FPS information fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO]", "import sys import argparse from imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import", "= ',inputId) if not args.get(\"detthreshold\",False): detThreshold = 0.55 else: detThreshold = float(args[\"detthreshold\"]) print('[INFO]", "required=False, help = \"input camera identifier (default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help", "0, 1]], dtype = \"double\" ) print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix)); # start", "real-time FPS counter rt_fps_count = rt_fps_count + 1 if rt_fps_count >= 10: t", "170.0, -135.0), # Right eye right corne (-150.0, -150.0, -125.0), # Left Mouth", "<reponame>AlbertaBeef/vitis_ai_python_examples<gh_stars>1-10 ''' Copyright 2021 Avnet Inc. Licensed under the Apache License, Version 2.0", "cv2.getTickCount() # Capture image from camera ret,frame = cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces", "image cv2.imshow(\"Head Pose Estimation\", frame) key = cv2.waitKey(1) & 0xFF # Update the", "use_dlib_landmarks == True: print(\"[INFO] face landmarks = DLIB\") else: print(\"[INFO] face landmarks =", "a bounding box surrounding the object so we can # visualize it cv2.rectangle(", "DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU based", "for full frame for i in range(5): landmarks[i,0] = startX + landmarks[i,0]*widthX landmarks[i,1]", "cv2.waitKey(1) & 0xFF # Update the FPS counter fps.update() # if the `q`", "else: status = status + \"VART\" status = status + \" Landmark=\" if", "algorithms if key == ord(\"d\"): use_dlib_detection = not use_dlib_detection if use_dlib_detection == True:", "ap.add_argument(\"-d\", \"--detthreshold\", required=False, help = \"face detector softmax threshold (default = 0.55)\") ap.add_argument(\"-n\",", "cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if use_dlib_detection == False: # Vitis-AI/DPU based face detector", "i in range(5): # x = int(landmarks[i,0]) # y = int(landmarks[i,1]) # cv2.circle(", "\", inputId ) exit() # 3D model points. model_points = np.array([ (0.0, 0.0,", "= int(left) startY = int(top) endX = int(right) endY = int(bottom) #print( startX,", "[--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] from ctypes import * from typing import", "camera \", inputId ) exit() # 3D model points. model_points = np.array([ (0.0,", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "nose_offset_y); #print(image_points) if use_dlib_landmarks == True: # extract face landmarks with DLIB dlib_rect", "-65.0), # Chin (-225.0, 170.0, -135.0), # Left eye left corner (225.0, 170.0,", ":\" status = status + \" FaceDetect=\" if use_dlib_detection == True: status =", "\"VART\" status = status + \" Landmark=\" if use_dlib_landmarks == True: status =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "in image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255),", "dlib_face_detector = dlib.get_frontal_face_detector() # Initialize DLIB based face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark", "= False rt_fps = 0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y", "dlib_face_detector(dlib_image, 0) for face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop over", "\" + rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA) # Display", "internals ret,frame = cam.read() size=frame.shape focal_length = size[1] center = (size[1]/2, size[0]/2) camera_matrix", "# Chin (place-holder for now) (landmarks[0,0], landmarks[0,1]), # Left eye left corner (landmarks[1,0],", "range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y),", "right corne (landmarks[3,0], landmarks[3,1]), # Left Mouth corner (landmarks[4,0], landmarks[4,1]) # Right mouth", "based face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert", "exit() # 3D model points. model_points = np.array([ (0.0, 0.0, 0.0), # Nose", "(left,top), (right,bottom), (0,255,0), 2) # extract the face ROI startX = int(left) startY", "# Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner ], dtype=\"double\") #print(image_points)", "camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector) # Project", "Chin (place-holder for now) (landmarks[0,0], landmarks[0,1]), # Left eye left corner (landmarks[1,0], landmarks[1,1]),", "os import math import threading import time import sys import argparse from imutils.video", "the faces for i,(left,top,right,bottom) in enumerate(faces): # draw a bounding box surrounding the", "= np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype =", "Camera Matrix :\\n {0}\".format(camera_matrix)); # start the FPS counter fps = FPS().start() #", "nose location eye_center_x = (image_points[2][0] + image_points[3][0])/2; eye_center_y = (image_points[2][1] + image_points[3][1])/2; nose_offset_x", "\"face detector NMS threshold (default = 0.35)\") args = vars(ap.parse_args()) if not args.get(\"input\",False):", "landmarks landmarks = dpu_face_landmark.process(face) # calculate coordinates for full frame for i in", "mouth_center_y + nose_offset_y); #print(image_points) if use_dlib_landmarks == True: # extract face landmarks with", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "-150.0, -125.0) # Right mouth corner ]) # Camera internals ret,frame = cam.read()", "2D points image_points = np.array([ (landmarks[2,0], landmarks[2,1]), # Nose tip (landmarks[2,0], landmarks[2,1]), #", "the video stream while True: # Update the real-time FPS counter if rt_fps_count", "args.get(\"detthreshold\",False): detThreshold = 0.55 else: detThreshold = float(args[\"detthreshold\"]) print('[INFO] face detector - softmax", "camera identifier (default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help = \"face detector softmax", "#print( startX, endX, startY, endY ) widthX = endX-startX heightY = endY-startY face", "= int(landmarks[i,1]) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare 2D points", "in compliance with the License. You may obtain a copy of the License", "Left eye left corner (landmarks[1,0], landmarks[1,1]), # Right eye right corne (landmarks[3,0], landmarks[3,1]),", "ret,frame = cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if use_dlib_detection == False:", "[--detthreshold 0.55] [--nmsthreshold 0.35] from ctypes import * from typing import List import", "170.0, -135.0), # Left eye left corner (225.0, 170.0, -135.0), # Right eye", "KIND, either express or implied. See the License for the specific language governing", "key = cv2.waitKey(1) & 0xFF # Update the FPS counter fps.update() # if", "if use_dlib_detection == True: print(\"[INFO] face detection = DLIB\") else: print(\"[INFO] face detection", "FPS information fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) # Stop", "(image_points[2][0] + image_points[3][0])/2; eye_center_y = (image_points[2][1] + image_points[3][1])/2; nose_offset_x = (image_points[0][0] - eye_center_x);", "startY = int(top) endX = int(right) endY = int(bottom) #print( startX, endX, startY,", "corner ], dtype=\"double\") # estimate approximate location of chin # let's assume that", "2) # draw head pose vector p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2 =", "args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face detector - NMS", "= ( int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0),", "in writing, software distributed under the License is distributed on an \"AS IS\"", "`l` key was pressed, toggle between landmark algorithms if key == ord(\"l\"): use_dlib_landmarks", "from ctypes import * from typing import List import cv2 import numpy as", "= True print(\"[INFO] face detection = VART\") print(\"[INFO] face landmarks = DLIB\") #", "landmarks = DLIB\") # Initialize the camera input print(\"[INFO] starting camera input ...\")", "0) for face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop over the", "draw landmarks #for i in range(5): # x = int(landmarks[i,0]) # y =", "== ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks if use_dlib_landmarks == True: print(\"[INFO] face landmarks", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "ctypes import * from typing import List import cv2 import numpy as np", "the real-time FPS counter rt_fps_count = rt_fps_count + 1 if rt_fps_count >= 10:", "We use this to draw a line sticking out of the nose (nose_end_point2D,", "= (mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points) if use_dlib_landmarks == True: #", "int(right) endY = int(bottom) #print( startX, endX, startY, endY ) widthX = endX-startX", "0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype = \"double\" ) print(\"[INFO]", "# Right eye right corne (landmarks[3,0], landmarks[3,1]), # Left Mouth corner (landmarks[4,0], landmarks[4,1])", "status = \"Status :\" status = status + \" FaceDetect=\" if use_dlib_detection ==", "the `q` key was pressed, break from the loop if key == ord(\"q\"):", "Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner ], dtype=\"double\") #print(image_points) # calculate", "or agreed to in writing, software distributed under the License is distributed on", "governing permissions and limitations under the License. ''' # USAGE # python face_headpose_dlib.py", "bounding box surrounding the object so we can # visualize it cv2.rectangle( frame,", "location of chin # let's assume that the chin location will behave similar", "if use_dlib_landmarks == True: status = status + \"DLIB\" else: status = status", "== False: # extract face landmarks landmarks = dpu_face_landmark.process(face) # calculate coordinates for", "# Right mouth corner ], dtype=\"double\") #print(image_points) # calculate head pose dist_coeffs =", "+ landmarks[i,1]*heightY # draw landmarks #for i in range(5): # x = int(landmarks[i,0])", "rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p in image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255),", "information fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) # Stop the", "startX, endX, startY, endY ) widthX = endX-startX heightY = endY-startY face =", "center = (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]],", "print(\"[INFO] face landmarks = VART\") # Update the real-time FPS counter rt_fps_count =", "landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1 # only one DPU kernel landmark_dpu", "FPS().start() # init the real-time FPS display rt_fps_count = 0; rt_fps_time = cv2.getTickCount()", "1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p in image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3,", "len(landmark_subgraphs) == 1 # only one DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark =", "ROI startX = int(left) startY = int(top) endX = int(right) endY = int(bottom)", "\"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector) # Project a 3D point (0,", "',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face", "import get_child_subgraph_dpu import dlib # construct the argument parse and parse the arguments", "0.0, 0.0), # Nose tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0),", "NMS threshold = ',nmsThreshold) # Initialize Vitis-AI/DPU based face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\"", "(255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2) # draw head pose vector", "= \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count = 0 # Stop the timer and", "(255,255,255), 2) # prepare 2D points image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose", "FaceDetect=\" if use_dlib_detection == True: status = status + \"DLIB\" else: status =", "',inputId) if not args.get(\"detthreshold\",False): detThreshold = 0.55 else: detThreshold = float(args[\"detthreshold\"]) print('[INFO] face", "status + \"DLIB\" else: status = status + \"VART\" status = status +", "left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left", "was pressed, toggle between landmark algorithms if key == ord(\"l\"): use_dlib_landmarks = not", "print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) # Stop the face detector dpu_face_detector.stop() del densebox_dpu dpu_face_landmark.stop()", "landmarks[4,1]) # Right mouth corner ], dtype=\"double\") # estimate approximate location of chin", "= int(bottom) #print( startX, endX, startY, endY ) widthX = endX-startX heightY =", "# y = int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare", "counter rt_fps_count = rt_fps_count + 1 if rt_fps_count >= 10: t = (cv2.getTickCount()", "imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark", "else: detThreshold = float(args[\"detthreshold\"]) print('[INFO] face detector - softmax threshold = ',detThreshold) if", "corner (150.0, -150.0, -125.0) # Right mouth corner ]) # Camera internals ret,frame", "Left eye left corner (225.0, 170.0, -135.0), # Right eye right corne (-150.0,", "image from camera ret,frame = cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if", "algorithms if key == ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks if use_dlib_landmarks == True:", "Right eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) #", "stream while True: # Update the real-time FPS counter if rt_fps_count == 0:", "if use_dlib_detection == True: # DLIB based face detector dlib_faces = dlib_face_detector(dlib_image, 0)", "# let's assume that the chin location will behave similar as the nose", "-1) #cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2)", "faces = [] if use_dlib_detection == False: # Vitis-AI/DPU based face detector faces", "Initialize DLIB based face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm", "frame, (x,y), 3, (255,255,255), 2) # prepare 2D points image_points = np.array([ (dlib_landmarks.part(30).x,", "detection = DLIB\") else: print(\"[INFO] face detection = VART\") # if the `l`", "only one DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize", "= 0 # Stop the timer and display FPS information fps.stop() print(\"[INFO] elapsed", "Left eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y),", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "loop if key == ord(\"q\"): break # if the `d` key was pressed,", "i,(left,top,right,bottom) in enumerate(faces): # draw a bounding box surrounding the object so we", "OF ANY KIND, either express or implied. See the License for the specific", "landmarks[1,1]), # Right eye right corne (landmarks[3,0], landmarks[3,1]), # Left Mouth corner (landmarks[4,0],", "corner (landmarks[4,0], landmarks[4,1]) # Right mouth corner ], dtype=\"double\") # estimate approximate location", "landmarks #for i in range(5): # x = int(landmarks[i,0]) # y = int(landmarks[i,1])", "\"--input\", required=False, help = \"input camera identifier (default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False,", "object so we can # visualize it cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2)", "# draw landmarks #for i in range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x) # y", "3, (255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2) # draw head pose", "dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector) # Project a", "counter fps = FPS().start() # init the real-time FPS display rt_fps_count = 0;", "status + \"DLIB\" else: status = status + \"VART\" if rt_fps_valid == True:", "(dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner ],", "for now) (landmarks[0,0], landmarks[0,1]), # Left eye left corner (landmarks[1,0], landmarks[1,1]), # Right", "detector NMS threshold (default = 0.35)\") args = vars(ap.parse_args()) if not args.get(\"input\",False): inputId", "print('[INFO] face detector - softmax threshold = ',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold =", "Update the FPS counter fps.update() # if the `q` key was pressed, break", "...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR] Failed to open", "int(left) startY = int(top) endX = int(right) endY = int(bottom) #print( startX, endX,", "else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face detector - NMS threshold = ',nmsThreshold) #", "cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector)", "not args.get(\"detthreshold\",False): detThreshold = 0.55 else: detThreshold = float(args[\"detthreshold\"]) print('[INFO] face detector -", "status = status + \"DLIB\" else: status = status + \"VART\" status =", "may not use this file except in compliance with the License. You may", "Mouth corner (150.0, -150.0, -125.0) # Right mouth corner ]) # Camera internals", "DLIB\") else: print(\"[INFO] face landmarks = VART\") # Update the real-time FPS counter", "estimate approximate location of chin # let's assume that the chin location will", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "startX + landmarks[i,0]*widthX landmarks[i,1] = startY + landmarks[i,1]*heightY # draw landmarks #for i", "0, 1000.0) onto the image plane. # We use this to draw a", "0 else: inputId = int(args[\"input\"]) print('[INFO] input camera identifier = ',inputId) if not", "from vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu import dlib # construct the", "right corne (-150.0, -150.0, -125.0), # Left Mouth corner (150.0, -150.0, -125.0) #", "== True: # DLIB based face detector dlib_faces = dlib_face_detector(dlib_image, 0) for face", "frame for i in range(5): landmarks[i,0] = startX + landmarks[i,0]*widthX landmarks[i,1] = startY", "= int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y), 3, (255,255,255), 2)", "0 # Stop the timer and display FPS information fps.stop() print(\"[INFO] elapsed time:", "else: inputId = int(args[\"input\"]) print('[INFO] input camera identifier = ',inputId) if not args.get(\"detthreshold\",False):", "# loop over the frames from the video stream while True: # Update", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "tip (landmarks[2,0], landmarks[2,1]), # Chin (place-holder for now) (landmarks[0,0], landmarks[0,1]), # Left eye", "p in image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])), 3,", "rt_fps_y = size[0]-10 # loop over the frames from the video stream while", "from the video stream while True: # Update the real-time FPS counter if", "# estimate approximate location of chin # let's assume that the chin location", "int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0), 2) # Display Status status = \"Status", "== False: # Vitis-AI/DPU based face detector faces = dpu_face_detector.process(frame) #print(faces) if use_dlib_detection", "dlib_landmarks.part(54).y) # Right mouth corner ], dtype=\"double\") #print(image_points) # calculate head pose dist_coeffs", "landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection = False", "# Left eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right corne (dlib_landmarks.part(48).x,", "import time import sys import argparse from imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from", "Camera internals ret,frame = cam.read() size=frame.shape focal_length = size[1] center = (size[1]/2, size[0]/2)", "sticking out of the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector,", "(dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left corner", "Display the processed image cv2.imshow(\"Head Pose Estimation\", frame) key = cv2.waitKey(1) & 0xFF", "# Right eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y)", "if key == ord(\"q\"): break # if the `d` key was pressed, toggle", "True: status = status + \" \" + rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX,", "if use_dlib_detection == True: status = status + \"DLIB\" else: status = status", "+ \"DLIB\" else: status = status + \"VART\" status = status + \"", "( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0), 2) # Display Status status =", "# calculate coordinates for full frame for i in range(5): landmarks[i,0] = startX", "rt_fps_valid == True: status = status + \" \" + rt_fps_message cv2.putText(frame, status,", "no lens distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print", "as np import vart import pathlib import xir import os import math import", "= 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help = \"face detector NMS threshold (default =", "DLIB based face detector dlib_face_detector = dlib.get_frontal_face_detector() # Initialize DLIB based face landmark", "[0, 0, 1]], dtype = \"double\" ) print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix)); #", "face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs)", "landmarks with DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw", "out of the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix,", "= 0 else: inputId = int(args[\"input\"]) print('[INFO] input camera identifier = ',inputId) if", "threading import time import sys import argparse from imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./'))", "# Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right", "if key == ord(\"d\"): use_dlib_detection = not use_dlib_detection if use_dlib_detection == True: print(\"[INFO]", "toggle between landmark algorithms if key == ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks if", "fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) # Stop the face", "np import vart import pathlib import xir import os import math import threading", ") exit() # 3D model points. model_points = np.array([ (0.0, 0.0, 0.0), #", "= (image_points[2][0] + image_points[3][0])/2; eye_center_y = (image_points[2][1] + image_points[3][1])/2; nose_offset_x = (image_points[0][0] -", "from vitis_ai_vart.utils import get_child_subgraph_dpu import dlib # construct the argument parse and parse", "(cam.isOpened()): print(\"[ERROR] Failed to open camera \", inputId ) exit() # 3D model", "-150.0, -125.0), # Left Mouth corner (150.0, -150.0, -125.0) # Right mouth corner", "camera input ...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR] Failed", "in range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y) # cv2.circle( frame,", "\"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count = 0 # Stop the timer and display", "permissions and limitations under the License. ''' # USAGE # python face_headpose_dlib.py [--input", "= ',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO]", "vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu import dlib", "args.get(\"input\",False): inputId = 0 else: inputId = int(args[\"input\"]) print('[INFO] input camera identifier =", "selection use_dlib_detection = False use_dlib_landmarks = True print(\"[INFO] face detection = VART\") print(\"[INFO]", "extract face landmarks landmarks = dpu_face_landmark.process(face) # calculate coordinates for full frame for", "argparse from imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark", "inputId = int(args[\"input\"]) print('[INFO] input camera identifier = ',inputId) if not args.get(\"detthreshold\",False): detThreshold", "See the License for the specific language governing permissions and limitations under the", "widthX = endX-startX heightY = endY-startY face = frame[startY:endY, startX:endX] if use_dlib_landmarks ==", "= dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for i in range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x)", "# Camera internals ret,frame = cam.read() size=frame.shape focal_length = size[1] center = (size[1]/2,", "print(\"[INFO] starting camera input ...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()):", "start the FPS counter fps = FPS().start() # init the real-time FPS display", "use_dlib_landmarks == True: status = status + \"DLIB\" else: status = status +", "status = status + \" \" + rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,", "timer and display FPS information fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS:", "get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1 # only one DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\")", "Right mouth corner ], dtype=\"double\") #print(image_points) # calculate head pose dist_coeffs = np.zeros((4,1))", "Nose tip (landmarks[2,0], landmarks[2,1]), # Chin (place-holder for now) (landmarks[0,0], landmarks[0,1]), # Left", "np.zeros((4,1)) # Assuming no lens distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,", "over the frames from the video stream while True: # Update the real-time", "1]], dtype = \"double\" ) print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix)); # start the", "= \"Status :\" status = status + \" FaceDetect=\" if use_dlib_detection == True:", "line sticking out of the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,", "], dtype=\"double\") # estimate approximate location of chin # let's assume that the", "VART\") # if the `l` key was pressed, toggle between landmark algorithms if", "= np.zeros((4,1)) # Assuming no lens distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points,", "''' Copyright 2021 Avnet Inc. Licensed under the Apache License, Version 2.0 (the", "\"--detthreshold\", required=False, help = \"face detector softmax threshold (default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\",", "dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection = False use_dlib_landmarks = True print(\"[INFO]", "]) # Camera internals ret,frame = cam.read() size=frame.shape focal_length = size[1] center =", "video stream while True: # Update the real-time FPS counter if rt_fps_count ==", "+ image_points[3][0])/2; eye_center_y = (image_points[2][1] + image_points[3][1])/2; nose_offset_x = (image_points[0][0] - eye_center_x); nose_offset_y", "corne (-150.0, -150.0, -125.0), # Left Mouth corner (150.0, -150.0, -125.0) # Right", "it cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2) # extract the face ROI startX", "landmarks #for i in range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y)", "0.55 else: detThreshold = float(args[\"detthreshold\"]) print('[INFO] face detector - softmax threshold = ',detThreshold)", "for face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop over the faces", "rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print", "# draw head pose vector p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2 = (", "numpy as np import vart import pathlib import xir import os import math", "= endY-startY face = frame[startY:endY, startX:endX] if use_dlib_landmarks == False: # extract face", "this file except in compliance with the License. You may obtain a copy", "\"DLIB\" else: status = status + \"VART\" if rt_fps_valid == True: status =", "= not use_dlib_landmarks if use_dlib_landmarks == True: print(\"[INFO] face landmarks = DLIB\") else:", "2) # Display Status status = \"Status :\" status = status + \"", "inputId = 0 else: inputId = int(args[\"input\"]) print('[INFO] input camera identifier = ',inputId)", "to draw a line sticking out of the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0,", "\"License\"); you may not use this file except in compliance with the License.", "int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0), 2) # Display Status status = \"Status :\"", "detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) ==", "License for the specific language governing permissions and limitations under the License. '''", "loop over the faces for i,(left,top,right,bottom) in enumerate(faces): # draw a bounding box", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "image plane. # We use this to draw a line sticking out of", "let's assume that the chin location will behave similar as the nose location", "vart import pathlib import xir import os import math import threading import time", "0.35 else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face detector - NMS threshold = ',nmsThreshold)", "(0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0), # Left eye left corner", "you may not use this file except in compliance with the License. You", "FPS counter if rt_fps_count == 0: rt_fps_time = cv2.getTickCount() # Capture image from", "break from the loop if key == ord(\"q\"): break # if the `d`", "nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p", "#print(\"[INFO] \",rt_fps_message) rt_fps_count = 0 # Stop the timer and display FPS information", "agreed to in writing, software distributed under the License is distributed on an", "USAGE # python face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] from ctypes import", "(dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right", "== ord(\"q\"): break # if the `d` key was pressed, toggle between detection", "of the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)", "image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points) if use_dlib_landmarks == True:", "face detection = DLIB\") else: print(\"[INFO] face detection = VART\") # if the", "approximate location of chin # let's assume that the chin location will behave", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "detector - softmax threshold = ',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else:", ") widthX = endX-startX heightY = endY-startY face = frame[startY:endY, startX:endX] if use_dlib_landmarks", "= VART\") print(\"[INFO] face landmarks = DLIB\") # Initialize the camera input print(\"[INFO]", "translation_vector, camera_matrix, dist_coeffs) for p in image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1)", "cv2.line(frame, p1, p2, (255,0,0), 2) # Display Status status = \"Status :\" status", "FPS: {:.2f}\".format(fps.fps())) # Stop the face detector dpu_face_detector.stop() del densebox_dpu dpu_face_landmark.stop() del landmark_dpu", "= status + \"VART\" status = status + \" Landmark=\" if use_dlib_landmarks ==", "rt_fps_valid = False rt_fps = 0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10", "extract face landmarks with DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect)", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection = False use_dlib_landmarks", "not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face detector -", "# Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left", "= cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n", "Project a 3D point (0, 0, 1000.0) onto the image plane. # We", "= status + \" Landmark=\" if use_dlib_landmarks == True: status = status +", "= 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help = \"face detector softmax threshold (default =", "implied. See the License for the specific language governing permissions and limitations under", "eye right corne (landmarks[3,0], landmarks[3,1]), # Left Mouth corner (landmarks[4,0], landmarks[4,1]) # Right", "Vitis-AI/DPU based face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph)", "the real-time FPS display rt_fps_count = 0; rt_fps_time = cv2.getTickCount() rt_fps_valid = False", "landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) ==", "the `l` key was pressed, toggle between landmark algorithms if key == ord(\"l\"):", "#print(image_points) if use_dlib_landmarks == True: # extract face landmarks with DLIB dlib_rect =", "detection algorithms if key == ord(\"d\"): use_dlib_detection = not use_dlib_detection if use_dlib_detection ==", "= DLIB\") # Initialize the camera input print(\"[INFO] starting camera input ...\") cam", "3, (0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])), 3,", "\"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection = False use_dlib_landmarks = True", "algorithm selection use_dlib_detection = False use_dlib_landmarks = True print(\"[INFO] face detection = VART\")", "inputId ) exit() # 3D model points. model_points = np.array([ (0.0, 0.0, 0.0),", "= cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if use_dlib_detection == False: # Vitis-AI/DPU based face", "2) # prepare 2D points image_points = np.array([ (landmarks[2,0], landmarks[2,1]), # Nose tip", "threshold (default = 0.35)\") args = vars(ap.parse_args()) if not args.get(\"input\",False): inputId = 0", "# Chin (-225.0, 170.0, -135.0), # Left eye left corner (225.0, 170.0, -135.0),", "init the real-time FPS display rt_fps_count = 0; rt_fps_time = cv2.getTickCount() rt_fps_valid =", "the FPS counter fps = FPS().start() # init the real-time FPS display rt_fps_count", "# prepare 2D points image_points = np.array([ (landmarks[2,0], landmarks[2,1]), # Nose tip (landmarks[2,0],", "image_points[5][1])/2; image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points) if use_dlib_landmarks ==", "dlib # construct the argument parse and parse the arguments ap = argparse.ArgumentParser()", "= dpu_face_landmark.process(face) # calculate coordinates for full frame for i in range(5): landmarks[i,0]", "= not use_dlib_detection if use_dlib_detection == True: print(\"[INFO] face detection = DLIB\") else:", "DLIB\") # Initialize the camera input print(\"[INFO] starting camera input ...\") cam =", "True print(\"[INFO] face detection = VART\") print(\"[INFO] face landmarks = DLIB\") # Initialize", "args = vars(ap.parse_args()) if not args.get(\"input\",False): inputId = 0 else: inputId = int(args[\"input\"])", "= dpu_face_detector.process(frame) #print(faces) if use_dlib_detection == True: # DLIB based face detector dlib_faces", "and display FPS information fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps()))", "between detection algorithms if key == ord(\"d\"): use_dlib_detection = not use_dlib_detection if use_dlib_detection", "List import cv2 import numpy as np import vart import pathlib import xir", "time import sys import argparse from imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect", "p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2,", "== ord(\"d\"): use_dlib_detection = not use_dlib_detection if use_dlib_detection == True: print(\"[INFO] face detection", "rt_fps_time = cv2.getTickCount() rt_fps_valid = False rt_fps = 0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps)", "print(\"[INFO] face detection = DLIB\") else: print(\"[INFO] face detection = VART\") # if", "use this file except in compliance with the License. You may obtain a", "# python face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] from ctypes import *", "FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB based face detector dlib_face_detector = dlib.get_frontal_face_detector() # Initialize", "# 3D model points. model_points = np.array([ (0.0, 0.0, 0.0), # Nose tip", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "points image_points = np.array([ (landmarks[2,0], landmarks[2,1]), # Nose tip (landmarks[2,0], landmarks[2,1]), # Chin", "+ nose_offset_y); #print(image_points) if use_dlib_landmarks == True: # extract face landmarks with DLIB", "# if the `d` key was pressed, toggle between detection algorithms if key", "= \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y = size[0]-10 # loop over the", "if rt_fps_count >= 10: t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if use_dlib_detection == False: # Vitis-AI/DPU based", "behave similar as the nose location eye_center_x = (image_points[2][0] + image_points[3][0])/2; eye_center_y =", "= True rt_fps = 10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count =", "focal_length = size[1] center = (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]],", "(rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA) # Display the processed image cv2.imshow(\"Head Pose", "size[1] center = (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length,", "the frames from the video stream while True: # Update the real-time FPS", "not use_dlib_detection if use_dlib_detection == True: print(\"[INFO] face detection = DLIB\") else: print(\"[INFO]", "plane. # We use this to draw a line sticking out of the", "1 # only one DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start()", "Initialize DLIB based face detector dlib_face_detector = dlib.get_frontal_face_detector() # Initialize DLIB based face", "= status + \"VART\" if rt_fps_valid == True: status = status + \"", "= (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps = 10.0/t rt_fps_message = \"FPS:", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "get_child_subgraph_dpu import dlib # construct the argument parse and parse the arguments ap", "(landmarks[3,0], landmarks[3,1]), # Left Mouth corner (landmarks[4,0], landmarks[4,1]) # Right mouth corner ],", "specific language governing permissions and limitations under the License. ''' # USAGE #", "required=False, help = \"face detector NMS threshold (default = 0.35)\") args = vars(ap.parse_args())", "y = int(landmarks[i,1]) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare 2D", "= int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare 2D points", "key == ord(\"q\"): break # if the `d` key was pressed, toggle between", "& 0xFF # Update the FPS counter fps.update() # if the `q` key", "+ \"VART\" status = status + \" Landmark=\" if use_dlib_landmarks == True: status", "= FPS().start() # init the real-time FPS display rt_fps_count = 0; rt_fps_time =", "input camera identifier = ',inputId) if not args.get(\"detthreshold\",False): detThreshold = 0.55 else: detThreshold", "use_dlib_detection = False use_dlib_landmarks = True print(\"[INFO] face detection = VART\") print(\"[INFO] face", "= DLIB\") else: print(\"[INFO] face landmarks = VART\") # Update the real-time FPS", "(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector)", "between landmark algorithms if key == ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks if use_dlib_landmarks", "+ image_points[3][1])/2; nose_offset_x = (image_points[0][0] - eye_center_x); nose_offset_y = (image_points[0][1] - eye_center_y); mouth_center_x", "(image_points[4][0] + image_points[5][0])/2; mouth_center_y = (image_points[4][1] + image_points[5][1])/2; image_points[1] = (mouth_center_x + nose_offset_x,", "Stop the face detector dpu_face_detector.stop() del densebox_dpu dpu_face_landmark.stop() del landmark_dpu # Cleanup cv2.destroyAllWindows()", "print(\"[ERROR] Failed to open camera \", inputId ) exit() # 3D model points.", "xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1 # only one DPU kernel", "= FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB based face detector dlib_face_detector = dlib.get_frontal_face_detector() #", "cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if use_dlib_detection == False: # Vitis-AI/DPU", "#for i in range(5): # x = int(landmarks[i,0]) # y = int(landmarks[i,1]) #", "elapsed FPS: {:.2f}\".format(fps.fps())) # Stop the face detector dpu_face_detector.stop() del densebox_dpu dpu_face_landmark.stop() del", "kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU based face", "status = status + \"VART\" if rt_fps_valid == True: status = status +", "required by applicable law or agreed to in writing, software distributed under the", "(255,255,255), 2) # prepare 2D points image_points = np.array([ (landmarks[2,0], landmarks[2,1]), # Nose", "(size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0,", "startY, endY ) widthX = endX-startX heightY = endY-startY face = frame[startY:endY, startX:endX]", "Status status = \"Status :\" status = status + \" FaceDetect=\" if use_dlib_detection", "nose_offset_x = (image_points[0][0] - eye_center_x); nose_offset_y = (image_points[0][1] - eye_center_y); mouth_center_x = (image_points[4][0]", "# y = int(landmarks[i,1]) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare", "\"VART\" if rt_fps_valid == True: status = status + \" \" + rt_fps_message", "( int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0), 2)", "\"--nmsthreshold\", required=False, help = \"face detector NMS threshold (default = 0.35)\") args =", "dtype = \"double\" ) print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix)); # start the FPS", "if rt_fps_valid == True: status = status + \" \" + rt_fps_message cv2.putText(frame,", "draw head pose vector p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]),", "densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1", "# Update the real-time FPS counter if rt_fps_count == 0: rt_fps_time = cv2.getTickCount()", "landmarks[i,0] = startX + landmarks[i,0]*widthX landmarks[i,1] = startY + landmarks[i,1]*heightY # draw landmarks", "assert len(densebox_subgraphs) == 1 # only one DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector", "corner ]) # Camera internals ret,frame = cam.read() size=frame.shape focal_length = size[1] center", "based face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert", "softmax threshold (default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help = \"face detector NMS", "lens distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation", "print('[INFO] input camera identifier = ',inputId) if not args.get(\"detthreshold\",False): detThreshold = 0.55 else:", "cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2) # draw head pose vector p1 =", "eye_center_y = (image_points[2][1] + image_points[3][1])/2; nose_offset_x = (image_points[0][0] - eye_center_x); nose_offset_y = (image_points[0][1]", "import vart import pathlib import xir import os import math import threading import", "if rt_fps_count == 0: rt_fps_time = cv2.getTickCount() # Capture image from camera ret,frame", "Landmark=\" if use_dlib_landmarks == True: status = status + \"DLIB\" else: status =", "face detector - NMS threshold = ',nmsThreshold) # Initialize Vitis-AI/DPU based face detector", "int(top) endX = int(right) endY = int(bottom) #print( startX, endX, startY, endY )", "= get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1 # only one DPU kernel landmark_dpu =", "over the faces for i,(left,top,right,bottom) in enumerate(faces): # draw a bounding box surrounding", "dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for i in range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x) #", "landmarks = DLIB\") else: print(\"[INFO] face landmarks = VART\") # Update the real-time", "use_dlib_detection == True: # DLIB based face detector dlib_faces = dlib_face_detector(dlib_image, 0) for", "FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu import dlib # construct", "# x = int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y), 3,", "required=False, help = \"face detector softmax threshold (default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False,", "threshold = ',nmsThreshold) # Initialize Vitis-AI/DPU based face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph", "# Right eye right corne (-150.0, -150.0, -125.0), # Left Mouth corner (150.0,", "License. ''' # USAGE # python face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35]", "Assuming no lens distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)", "softmax threshold = ',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else: nmsThreshold =", "based face detector dlib_faces = dlib_face_detector(dlib_image, 0) for face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom())", "3, (255,255,255), 2) # prepare 2D points image_points = np.array([ (landmarks[2,0], landmarks[2,1]), #", "rt_fps = 0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y = size[0]-10", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "the camera input print(\"[INFO] starting camera input ...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480)", "model points. model_points = np.array([ (0.0, 0.0, 0.0), # Nose tip (0.0, -330.0,", "was pressed, break from the loop if key == ord(\"q\"): break # if", "= \"double\" ) print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix)); # start the FPS counter", "Matrix :\\n {0}\".format(camera_matrix)); # start the FPS counter fps = FPS().start() # init", "not use this file except in compliance with the License. You may obtain", "# Project a 3D point (0, 0, 1000.0) onto the image plane. #", "use_dlib_detection = not use_dlib_detection if use_dlib_detection == True: print(\"[INFO] face detection = DLIB\")", "detector softmax threshold (default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help = \"face detector", "(150.0, -150.0, -125.0) # Right mouth corner ]) # Camera internals ret,frame =", "(int(p[0]), int(p[1])), 3, (255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2) # draw", "(dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y),", "True: # DLIB based face detector dlib_faces = dlib_face_detector(dlib_image, 0) for face in", "= status + \"DLIB\" else: status = status + \"VART\" status = status", "# x = int(landmarks[i,0]) # y = int(landmarks[i,1]) # cv2.circle( frame, (x,y), 3,", "from camera ret,frame = cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if use_dlib_detection", "(0.0, 0.0, 0.0), # Nose tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0,", "pose vector p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame,", "+ landmarks[i,0]*widthX landmarks[i,1] = startY + landmarks[i,1]*heightY # draw landmarks #for i in", "# only one DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() #", "faces = dpu_face_detector.process(frame) #print(faces) if use_dlib_detection == True: # DLIB based face detector", "+ rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA) # Display the", "(default = 0.35)\") args = vars(ap.parse_args()) if not args.get(\"input\",False): inputId = 0 else:", "DLIB based face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm selection", "0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help = \"face detector NMS threshold (default = 0.35)\")", "Update the real-time FPS counter rt_fps_count = rt_fps_count + 1 if rt_fps_count >=", "# if the `l` key was pressed, toggle between landmark algorithms if key", "# Capture image from camera ret,frame = cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces =", "= [] if use_dlib_detection == False: # Vitis-AI/DPU based face detector faces =", "similar as the nose location eye_center_x = (image_points[2][0] + image_points[3][0])/2; eye_center_y = (image_points[2][1]", "camera ret,frame = cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if use_dlib_detection ==", "mouth_center_y = (image_points[4][1] + image_points[5][1])/2; image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y);", "import pathlib import xir import os import math import threading import time import", "if the `l` key was pressed, toggle between landmark algorithms if key ==", "pose dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion (success, rotation_vector, translation_vector) =", "if not args.get(\"input\",False): inputId = 0 else: inputId = int(args[\"input\"]) print('[INFO] input camera", "int(args[\"input\"]) print('[INFO] input camera identifier = ',inputId) if not args.get(\"detthreshold\",False): detThreshold = 0.55", "vector p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1,", "based face detector dlib_face_detector = dlib.get_frontal_face_detector() # Initialize DLIB based face landmark dlib_landmark_model", "3, (255,255,255), 2) # draw head pose vector p1 = ( int(image_points[0][0]), int(image_points[0][1]))", "= 10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count = 0 # Stop", "vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU based face landmark landmark_xmodel =", "False rt_fps = 0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y =", "face = frame[startY:endY, startX:endX] if use_dlib_landmarks == False: # extract face landmarks landmarks", "= dlib_face_detector(dlib_image, 0) for face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop", "the processed image cv2.imshow(\"Head Pose Estimation\", frame) key = cv2.waitKey(1) & 0xFF #", "based face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection", "frame[startY:endY, startX:endX] if use_dlib_landmarks == False: # extract face landmarks landmarks = dpu_face_landmark.process(face)", "face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] from ctypes import * from typing", "2021 Avnet Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you", "print('[INFO] face detector - NMS threshold = ',nmsThreshold) # Initialize Vitis-AI/DPU based face", "np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x,", "dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points,", "status + \"VART\" if rt_fps_valid == True: status = status + \" \"", "ANY KIND, either express or implied. See the License for the specific language", "\" \" + rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA) #", "ord(\"d\"): use_dlib_detection = not use_dlib_detection if use_dlib_detection == True: print(\"[INFO] face detection =", "endX-startX heightY = endY-startY face = frame[startY:endY, startX:endX] if use_dlib_landmarks == False: #", "(int(p[0]), int(p[1])), 3, (0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2) cv2.circle(frame, (int(p[0]),", "landmarks[2,1]), # Chin (place-holder for now) (landmarks[0,0], landmarks[0,1]), # Left eye left corner", "Vitis-AI/DPU based face detector faces = dpu_face_detector.process(frame) #print(faces) if use_dlib_detection == True: #", "Left Mouth corner (landmarks[4,0], landmarks[4,1]) # Right mouth corner ], dtype=\"double\") # estimate", "file except in compliance with the License. You may obtain a copy of", "(image_points[0][1] - eye_center_y); mouth_center_x = (image_points[4][0] + image_points[5][0])/2; mouth_center_y = (image_points[4][1] + image_points[5][1])/2;", "Right eye right corne (landmarks[3,0], landmarks[3,1]), # Left Mouth corner (landmarks[4,0], landmarks[4,1]) #", "detector dlib_faces = dlib_face_detector(dlib_image, 0) for face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces)", "DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for", "full frame for i in range(5): landmarks[i,0] = startX + landmarks[i,0]*widthX landmarks[i,1] =", "left corner (landmarks[1,0], landmarks[1,1]), # Right eye right corne (landmarks[3,0], landmarks[3,1]), # Left", "points. model_points = np.array([ (0.0, 0.0, 0.0), # Nose tip (0.0, -330.0, -65.0),", "= cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) faces = [] if use_dlib_detection == False: #", "one DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU", "cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA) # Display the processed image", "- softmax threshold = ',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else: nmsThreshold", "= False use_dlib_landmarks = True print(\"[INFO] face detection = VART\") print(\"[INFO] face landmarks", "-135.0), # Left eye left corner (225.0, 170.0, -135.0), # Right eye right", "2.0 (the \"License\"); you may not use this file except in compliance with", "0] [--detthreshold 0.55] [--nmsthreshold 0.35] from ctypes import * from typing import List", "eye_center_x = (image_points[2][0] + image_points[3][0])/2; eye_center_y = (image_points[2][1] + image_points[3][1])/2; nose_offset_x = (image_points[0][0]", "dtype=\"double\") # estimate approximate location of chin # let's assume that the chin", "3, (255,255,255), 2) # prepare 2D points image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), #", "for the specific language governing permissions and limitations under the License. ''' #", "cv2.getTickCount() rt_fps_valid = False rt_fps = 0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x =", "dlib_landmarks.part(36).y), # Left eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right corne", "(0, 0, 1000.0) onto the image plane. # We use this to draw", "float(args[\"detthreshold\"]) print('[INFO] face detector - softmax threshold = ',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold", "-125.0), # Left Mouth corner (150.0, -150.0, -125.0) # Right mouth corner ])", "[] if use_dlib_detection == False: # Vitis-AI/DPU based face detector faces = dpu_face_detector.process(frame)", "int(landmarks[i,0]) # y = int(landmarks[i,1]) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) #", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare 2D points image_points = np.array([", "face detector dlib_face_detector = dlib.get_frontal_face_detector() # Initialize DLIB based face landmark dlib_landmark_model =", "= 0; rt_fps_time = cv2.getTickCount() rt_fps_valid = False rt_fps = 0.0 rt_fps_message =", "= np.array([ (landmarks[2,0], landmarks[2,1]), # Nose tip (landmarks[2,0], landmarks[2,1]), # Chin (place-holder for", "= dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for i in", "t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps = 10.0/t rt_fps_message =", "dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection = False use_dlib_landmarks = True print(\"[INFO] face detection", "(landmarks[0,0], landmarks[0,1]), # Left eye left corner (landmarks[1,0], landmarks[1,1]), # Right eye right", "== True: status = status + \" \" + rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y),", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), #", "(dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner", "(int(p[0]), int(p[1])), 3, (255,255,255), 2) # draw head pose vector p1 = (", "as the nose location eye_center_x = (image_points[2][0] + image_points[3][0])/2; eye_center_y = (image_points[2][1] +", "y = int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare 2D", "#print(image_points) # calculate head pose dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion", "Avnet Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "= xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1 # only one DPU", "(the \"License\"); you may not use this file except in compliance with the", "draw a bounding box surrounding the object so we can # visualize it", "= VART\") # if the `l` key was pressed, toggle between landmark algorithms", "image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2)", "+ \"VART\" if rt_fps_valid == True: status = status + \" \" +", "10 rt_fps_y = size[0]-10 # loop over the frames from the video stream", "landmarks[i,1]*heightY # draw landmarks #for i in range(5): # x = int(landmarks[i,0]) #", "Vector:\\n {0}\".format(translation_vector) # Project a 3D point (0, 0, 1000.0) onto the image", "cv2 import numpy as np import vart import pathlib import xir import os", "frames from the video stream while True: # Update the real-time FPS counter", "key was pressed, break from the loop if key == ord(\"q\"): break #", "(place-holder for now) (landmarks[0,0], landmarks[0,1]), # Left eye left corner (landmarks[1,0], landmarks[1,1]), #", "True: print(\"[INFO] face landmarks = DLIB\") else: print(\"[INFO] face landmarks = VART\") #", "endY-startY face = frame[startY:endY, startX:endX] if use_dlib_landmarks == False: # extract face landmarks", "# Update the FPS counter fps.update() # if the `q` key was pressed,", "',nmsThreshold) # Initialize Vitis-AI/DPU based face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel)", "= \"input camera identifier (default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help = \"face", "vitis_ai_vart.utils import get_child_subgraph_dpu import dlib # construct the argument parse and parse the", "Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector) # Project a 3D point (0, 0,", "0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help = \"face detector softmax threshold (default = 0.55)\")", "cv2.imshow(\"Head Pose Estimation\", frame) key = cv2.waitKey(1) & 0xFF # Update the FPS", "use_dlib_detection == True: status = status + \"DLIB\" else: status = status +", "= vars(ap.parse_args()) if not args.get(\"input\",False): inputId = 0 else: inputId = int(args[\"input\"]) print('[INFO]", "2D points image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part(", "\"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y = size[0]-10 # loop over the frames", "processed image cv2.imshow(\"Head Pose Estimation\", frame) key = cv2.waitKey(1) & 0xFF # Update", "pressed, toggle between detection algorithms if key == ord(\"d\"): use_dlib_detection = not use_dlib_detection", "= ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0), 2) # Display Status status", "display FPS information fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) #", "corner ], dtype=\"double\") #print(image_points) # calculate head pose dist_coeffs = np.zeros((4,1)) # Assuming", "print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix)); # start the FPS counter fps = FPS().start()", "frame, (left,top), (right,bottom), (0,255,0), 2) # extract the face ROI startX = int(left)", "the FPS counter fps.update() # if the `q` key was pressed, break from", "rt_fps = 10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count = 0 #", "a 3D point (0, 0, 1000.0) onto the image plane. # We use", "math import threading import time import sys import argparse from imutils.video import FPS", "image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector) #", "= float(args[\"detthreshold\"]) print('[INFO] face detector - softmax threshold = ',detThreshold) if not args.get(\"nmsthreshold\",False):", "display rt_fps_count = 0; rt_fps_time = cv2.getTickCount() rt_fps_valid = False rt_fps = 0.0", "use_dlib_detection == False: # Vitis-AI/DPU based face detector faces = dpu_face_detector.process(frame) #print(faces) if", ") print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix)); # start the FPS counter fps =", "0: rt_fps_time = cv2.getTickCount() # Capture image from camera ret,frame = cam.read() dlib_image", "# only one DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() #", "center[1]], [0, 0, 1]], dtype = \"double\" ) print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix));", "ret,frame = cam.read() size=frame.shape focal_length = size[1] center = (size[1]/2, size[0]/2) camera_matrix =", "\"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1 # only", "import threading import time import sys import argparse from imutils.video import FPS sys.path.append(os.path.abspath('../'))", "parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help = \"input camera", "# DLIB based face detector dlib_faces = dlib_face_detector(dlib_image, 0) for face in dlib_faces:", "(255,0,0), 2) # Display Status status = \"Status :\" status = status +", "# Right mouth corner ], dtype=\"double\") # estimate approximate location of chin #", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs)", "(landmarks[1,0], landmarks[1,1]), # Right eye right corne (landmarks[3,0], landmarks[3,1]), # Left Mouth corner", "DLIB\") else: print(\"[INFO] face detection = VART\") # if the `l` key was", "threshold (default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help = \"face detector NMS threshold", "face detector dlib_faces = dlib_face_detector(dlib_image, 0) for face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) )", "not use_dlib_landmarks if use_dlib_landmarks == True: print(\"[INFO] face landmarks = DLIB\") else: print(\"[INFO]", "the face ROI startX = int(left) startY = int(top) endX = int(right) endY", "= status + \" FaceDetect=\" if use_dlib_detection == True: status = status +", "use_dlib_landmarks if use_dlib_landmarks == True: print(\"[INFO] face landmarks = DLIB\") else: print(\"[INFO] face", "the timer and display FPS information fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "len(densebox_subgraphs) == 1 # only one DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector =", "dlib_landmarks.part(45).y), # Right eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner (dlib_landmarks.part(54).x,", "from vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu import", "tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left", "int(landmarks[i,1]) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare 2D points image_points", "rt_fps_count = rt_fps_count + 1 if rt_fps_count >= 10: t = (cv2.getTickCount() -", "- eye_center_x); nose_offset_y = (image_points[0][1] - eye_center_y); mouth_center_x = (image_points[4][0] + image_points[5][0])/2; mouth_center_y", "import dlib # construct the argument parse and parse the arguments ap =", "= 0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y = size[0]-10 #", "import math import threading import time import sys import argparse from imutils.video import", "cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR] Failed to open camera \", inputId", "# We use this to draw a line sticking out of the nose", "int(p[1])), 3, (0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])),", "int(bottom) #print( startX, endX, startY, endY ) widthX = endX-startX heightY = endY-startY", "law or agreed to in writing, software distributed under the License is distributed", "8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), #", "corner (landmarks[1,0], landmarks[1,1]), # Right eye right corne (landmarks[3,0], landmarks[3,1]), # Left Mouth", "toggle between detection algorithms if key == ord(\"d\"): use_dlib_detection = not use_dlib_detection if", "dlib.get_frontal_face_detector() # Initialize DLIB based face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model)", "(landmarks[2,0], landmarks[2,1]), # Chin (place-holder for now) (landmarks[0,0], landmarks[0,1]), # Left eye left", "identifier (default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help = \"face detector softmax threshold", "import FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu import dlib # construct the argument parse", "import xir import os import math import threading import time import sys import", "# Initialize DLIB based face detector dlib_face_detector = dlib.get_frontal_face_detector() # Initialize DLIB based", "counter if rt_fps_count == 0: rt_fps_time = cv2.getTickCount() # Capture image from camera", "dlib_landmarks.part(48).y), # Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner ], dtype=\"double\")", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA) # Display the processed image cv2.imshow(\"Head", "VART\") # Update the real-time FPS counter rt_fps_count = rt_fps_count + 1 if", "eye left corner (225.0, 170.0, -135.0), # Right eye right corne (-150.0, -150.0,", "# construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\",", "= cv2.getTickCount() # Capture image from camera ret,frame = cam.read() dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)", "== True: print(\"[INFO] face detection = DLIB\") else: print(\"[INFO] face detection = VART\")", "from the loop if key == ord(\"q\"): break # if the `d` key", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "landmarks[0,1]), # Left eye left corner (landmarks[1,0], landmarks[1,1]), # Right eye right corne", "(landmarks[2,0], landmarks[2,1]), # Nose tip (landmarks[2,0], landmarks[2,1]), # Chin (place-holder for now) (landmarks[0,0],", "= int(args[\"input\"]) print('[INFO] input camera identifier = ',inputId) if not args.get(\"detthreshold\",False): detThreshold =", "10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count = 0 # Stop the", "VART\") print(\"[INFO] face landmarks = DLIB\") # Initialize the camera input print(\"[INFO] starting", "the real-time FPS counter if rt_fps_count == 0: rt_fps_time = cv2.getTickCount() # Capture", "help = \"face detector NMS threshold (default = 0.35)\") args = vars(ap.parse_args()) if", "0.0), # Nose tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0), #", "False: # Vitis-AI/DPU based face detector faces = dpu_face_detector.process(frame) #print(faces) if use_dlib_detection ==", "FPS counter fps.update() # if the `q` key was pressed, break from the", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "rt_fps_x = 10 rt_fps_y = size[0]-10 # loop over the frames from the", "#print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector) # Project a 3D point", "= argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help = \"input camera identifier (default = 0)\")", "= dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection = False use_dlib_landmarks = True print(\"[INFO] face", "endY = int(bottom) #print( startX, endX, startY, endY ) widthX = endX-startX heightY", "and limitations under the License. ''' # USAGE # python face_headpose_dlib.py [--input 0]", "detection = VART\") print(\"[INFO] face landmarks = DLIB\") # Initialize the camera input", "the chin location will behave similar as the nose location eye_center_x = (image_points[2][0]", "dpu_face_landmark.start() # Initialize DLIB based face detector dlib_face_detector = dlib.get_frontal_face_detector() # Initialize DLIB", "draw a line sticking out of the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0,", "corner (225.0, 170.0, -135.0), # Right eye right corne (-150.0, -150.0, -125.0), #", "\",rt_fps_message) rt_fps_count = 0 # Stop the timer and display FPS information fps.stop()", "== True: print(\"[INFO] face landmarks = DLIB\") else: print(\"[INFO] face landmarks = VART\")", "real-time FPS counter if rt_fps_count == 0: rt_fps_time = cv2.getTickCount() # Capture image", "either express or implied. See the License for the specific language governing permissions", "= \"face detector softmax threshold (default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help =", "(-225.0, 170.0, -135.0), # Left eye left corner (225.0, 170.0, -135.0), # Right", "rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA) # Display the processed", "= \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1 #", "= size[1] center = (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0,", "= cv2.getTickCount() rt_fps_valid = False rt_fps = 0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x", "cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA) # Display the processed image cv2.imshow(\"Head Pose Estimation\",", "int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(frame, p1, p2, (255,0,0), 2) # Display", "fps.update() # if the `q` key was pressed, break from the loop if", "# draw a bounding box surrounding the object so we can # visualize", "face landmarks = VART\") # Update the real-time FPS counter rt_fps_count = rt_fps_count", "import argparse from imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect from", "= (image_points[4][1] + image_points[5][1])/2; image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points)", "# extract face landmarks with DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks =", "\"DLIB\" else: status = status + \"VART\" status = status + \" Landmark=\"", "import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark from", "landmarks[2,1]), # Nose tip (landmarks[2,0], landmarks[2,1]), # Chin (place-holder for now) (landmarks[0,0], landmarks[0,1]),", "startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for i in range(dlib_landmarks.num_parts): #", "face in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop over the faces for", ":\\n {0}\".format(camera_matrix)); # start the FPS counter fps = FPS().start() # init the", "Initialize the camera input print(\"[INFO] starting camera input ...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640)", "can # visualize it cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2) # extract the", "nose_offset_y = (image_points[0][1] - eye_center_y); mouth_center_x = (image_points[4][0] + image_points[5][0])/2; mouth_center_y = (image_points[4][1]", "cam.read() size=frame.shape focal_length = size[1] center = (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length,", "calculate head pose dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion (success, rotation_vector,", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "rt_fps_count == 0: rt_fps_time = cv2.getTickCount() # Capture image from camera ret,frame =", "calculate coordinates for full frame for i in range(5): landmarks[i,0] = startX +", "the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False,", "face ROI startX = int(left) startY = int(top) endX = int(right) endY =", ") dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for i in range(dlib_landmarks.num_parts): # x", "the object so we can # visualize it cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0),", "help = \"input camera identifier (default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help =", "detector faces = dpu_face_detector.process(frame) #print(faces) if use_dlib_detection == True: # DLIB based face", "1, cv2.LINE_AA) # Display the processed image cv2.imshow(\"Head Pose Estimation\", frame) key =", "image_points = np.array([ (landmarks[2,0], landmarks[2,1]), # Nose tip (landmarks[2,0], landmarks[2,1]), # Chin (place-holder", "to open camera \", inputId ) exit() # 3D model points. model_points =", "densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1 # only one", "(image_points[0][0] - eye_center_x); nose_offset_y = (image_points[0][1] - eye_center_y); mouth_center_x = (image_points[4][0] + image_points[5][0])/2;", "draw landmarks #for i in range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x) # y =", "for i,(left,top,right,bottom) in enumerate(faces): # draw a bounding box surrounding the object so", "{0}\".format(translation_vector) # Project a 3D point (0, 0, 1000.0) onto the image plane.", "Right eye right corne (-150.0, -150.0, -125.0), # Left Mouth corner (150.0, -150.0,", "corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth", "rt_fps_count = 0 # Stop the timer and display FPS information fps.stop() print(\"[INFO]", "+ image_points[5][1])/2; image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points) if use_dlib_landmarks", "(0,255,0), 1, cv2.LINE_AA) # Display the processed image cv2.imshow(\"Head Pose Estimation\", frame) key", "= get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1 # only one DPU kernel densebox_dpu =", "# USAGE # python face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] from ctypes", "mouth corner ]) # Camera internals ret,frame = cam.read() size=frame.shape focal_length = size[1]", "enumerate(faces): # draw a bounding box surrounding the object so we can #", "= int(top) endX = int(right) endY = int(bottom) #print( startX, endX, startY, endY", "0.5, (0,255,0), 1, cv2.LINE_AA) # Display the processed image cv2.imshow(\"Head Pose Estimation\", frame)", "Pose Estimation\", frame) key = cv2.waitKey(1) & 0xFF # Update the FPS counter", "# Stop the timer and display FPS information fps.stop() print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))", "rt_fps_count = 0; rt_fps_time = cv2.getTickCount() rt_fps_valid = False rt_fps = 0.0 rt_fps_message", "and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help = \"input", "real-time FPS display rt_fps_count = 0; rt_fps_time = cv2.getTickCount() rt_fps_valid = False rt_fps", "of chin # let's assume that the chin location will behave similar as", "will behave similar as the nose location eye_center_x = (image_points[2][0] + image_points[3][0])/2; eye_center_y", "(default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help = \"face detector softmax threshold (default", "dist_coeffs) for p in image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1) #cv2.circle(face, (int(p[0]),", "sys import argparse from imutils.video import FPS sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect", "dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU based face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\"", "\" Landmark=\" if use_dlib_landmarks == True: status = status + \"DLIB\" else: status", "= VART\") # Update the real-time FPS counter rt_fps_count = rt_fps_count + 1", "rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y = size[0]-10 # loop over", "= rt_fps_count + 1 if rt_fps_count >= 10: t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency()", "rt_fps_time = cv2.getTickCount() # Capture image from camera ret,frame = cam.read() dlib_image =", "chin location will behave similar as the nose location eye_center_x = (image_points[2][0] +", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "location will behave similar as the nose location eye_center_x = (image_points[2][0] + image_points[3][0])/2;", "== True: status = status + \"DLIB\" else: status = status + \"VART\"", "use_dlib_landmarks = not use_dlib_landmarks if use_dlib_landmarks == True: print(\"[INFO] face landmarks = DLIB\")", "# Assuming no lens distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs,", "nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points) if use_dlib_landmarks == True: # extract face landmarks", "rt_fps_count + 1 if rt_fps_count >= 10: t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid", "if not (cam.isOpened()): print(\"[ERROR] Failed to open camera \", inputId ) exit() #", "the specific language governing permissions and limitations under the License. ''' # USAGE", "translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation", "x = int(landmarks[i,0]) # y = int(landmarks[i,1]) # cv2.circle( frame, (x,y), 3, (255,255,255),", "# algorithm selection use_dlib_detection = False use_dlib_landmarks = True print(\"[INFO] face detection =", "True: # Update the real-time FPS counter if rt_fps_count == 0: rt_fps_time =", "to in writing, software distributed under the License is distributed on an \"AS", "\"double\" ) print(\"[INFO] Camera Matrix :\\n {0}\".format(camera_matrix)); # start the FPS counter fps", "= (image_points[4][0] + image_points[5][0])/2; mouth_center_y = (image_points[4][1] + image_points[5][1])/2; image_points[1] = (mouth_center_x +", "sys.path.append(os.path.abspath('../')) sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils import", "= 10 rt_fps_y = size[0]-10 # loop over the frames from the video", "import List import cv2 import numpy as np import vart import pathlib import", "dtype=\"double\") #print(image_points) # calculate head pose dist_coeffs = np.zeros((4,1)) # Assuming no lens", "dpu_face_landmark.process(face) # calculate coordinates for full frame for i in range(5): landmarks[i,0] =", "{0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector) # Project a 3D point (0, 0, 1000.0)", "Failed to open camera \", inputId ) exit() # 3D model points. model_points", "True: # extract face landmarks with DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks", "point (0, 0, 1000.0) onto the image plane. # We use this to", "True: status = status + \"DLIB\" else: status = status + \"VART\" if", "startX:endX] if use_dlib_landmarks == False: # extract face landmarks landmarks = dpu_face_landmark.process(face) #", "face detection = VART\") print(\"[INFO] face landmarks = DLIB\") # Initialize the camera", "head pose vector p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))", "= xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1 # only one DPU", "# Initialize Vitis-AI/DPU based face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs", "# Update the real-time FPS counter rt_fps_count = rt_fps_count + 1 if rt_fps_count", "except in compliance with the License. You may obtain a copy of the", "in dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop over the faces for i,(left,top,right,bottom)", "endX = int(right) endY = int(bottom) #print( startX, endX, startY, endY ) widthX", "# Left eye left corner (landmarks[1,0], landmarks[1,1]), # Right eye right corne (landmarks[3,0],", "dlib_faces: faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop over the faces for i,(left,top,right,bottom) in", "int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare 2D points image_points", "= float(args[\"nmsthreshold\"]) print('[INFO] face detector - NMS threshold = ',nmsThreshold) # Initialize Vitis-AI/DPU", "(x,y), 3, (255,255,255), 2) # prepare 2D points image_points = np.array([ (landmarks[2,0], landmarks[2,1]),", "tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0), # Left eye left", "eye_center_y); mouth_center_x = (image_points[4][0] + image_points[5][0])/2; mouth_center_y = (image_points[4][1] + image_points[5][1])/2; image_points[1] =", "get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1 # only one DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\")", "(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p in", "use_dlib_landmarks == True: # extract face landmarks with DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY", "= cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p in image_points: #cv2.circle(face,", "#cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2) cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2) #", "+ \" FaceDetect=\" if use_dlib_detection == True: status = status + \"DLIB\" else:", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "0; rt_fps_time = cv2.getTickCount() rt_fps_valid = False rt_fps = 0.0 rt_fps_message = \"FPS:", "\"input camera identifier (default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\", required=False, help = \"face detector", "status = status + \"DLIB\" else: status = status + \"VART\" if rt_fps_valid", "#for i in range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y) #", "\" FaceDetect=\" if use_dlib_detection == True: status = status + \"DLIB\" else: status", "= 0.35 else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face detector - NMS threshold =", "Mouth corner (landmarks[4,0], landmarks[4,1]) # Right mouth corner ], dtype=\"double\") # estimate approximate", "in range(5): landmarks[i,0] = startX + landmarks[i,0]*widthX landmarks[i,1] = startY + landmarks[i,1]*heightY #", "landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1", "corne (landmarks[3,0], landmarks[3,1]), # Left Mouth corner (landmarks[4,0], landmarks[4,1]) # Right mouth corner", "x = int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y), 3, (255,255,255),", "int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y) # cv2.circle( frame, (x,y), 3, (255,255,255), 2) #", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p in image_points: #cv2.circle(face, (int(p[0]), int(p[1])),", "only one DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize", "np.array([ (0.0, 0.0, 0.0), # Nose tip (0.0, -330.0, -65.0), # Chin (-225.0,", "xir import os import math import threading import time import sys import argparse", "arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help = \"input camera identifier (default", "+ \" \" + rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)", "- eye_center_y); mouth_center_x = (image_points[4][0] + image_points[5][0])/2; mouth_center_y = (image_points[4][1] + image_points[5][1])/2; image_points[1]", "True rt_fps = 10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count = 0", "rt_fps_count >= 10: t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps =", "size[0]-10 # loop over the frames from the video stream while True: #", "Nose tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0), # Left eye", "= (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0,", "language governing permissions and limitations under the License. ''' # USAGE # python", "right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth", "8).x, dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left corner (dlib_landmarks.part(45).x,", "import os import math import threading import time import sys import argparse from", "# Display Status status = \"Status :\" status = status + \" FaceDetect=\"", "key == ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks if use_dlib_landmarks == True: print(\"[INFO] face", "range(5): landmarks[i,0] = startX + landmarks[i,0]*widthX landmarks[i,1] = startY + landmarks[i,1]*heightY # draw", "FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu import dlib # construct the argument parse and", "Right mouth corner ]) # Camera internals ret,frame = cam.read() size=frame.shape focal_length =", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2) # extract the face ROI startX =", "faces.append( (face.left(),face.top(),face.right(),face.bottom()) ) #print(faces) # loop over the faces for i,(left,top,right,bottom) in enumerate(faces):", "range(5): # x = int(landmarks[i,0]) # y = int(landmarks[i,1]) # cv2.circle( frame, (x,y),", "flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n {0}\".format(rotation_vector) #print \"Translation Vector:\\n {0}\".format(translation_vector) # Project a 3D", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "''' # USAGE # python face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] from", "= frame[startY:endY, startX:endX] if use_dlib_landmarks == False: # extract face landmarks landmarks =", "#print \"Translation Vector:\\n {0}\".format(translation_vector) # Project a 3D point (0, 0, 1000.0) onto", "0.0 rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y = size[0]-10 # loop", "status + \"VART\" status = status + \" Landmark=\" if use_dlib_landmarks == True:", "if key == ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks if use_dlib_landmarks == True: print(\"[INFO]", "argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help", "= cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR] Failed to open camera \",", "mouth_center_x = (image_points[4][0] + image_points[5][0])/2; mouth_center_y = (image_points[4][1] + image_points[5][1])/2; image_points[1] = (mouth_center_x", "{:.2f}\".format(fps.fps())) # Stop the face detector dpu_face_detector.stop() del densebox_dpu dpu_face_landmark.stop() del landmark_dpu #", "location eye_center_x = (image_points[2][0] + image_points[3][0])/2; eye_center_y = (image_points[2][1] + image_points[3][1])/2; nose_offset_x =", "distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) #print \"Rotation Vector:\\n", "eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), #", "dlib_rect = dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for i", "model_points = np.array([ (0.0, 0.0, 0.0), # Nose tip (0.0, -330.0, -65.0), #", "== True: # extract face landmarks with DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY )", "[--nmsthreshold 0.35] from ctypes import * from typing import List import cv2 import", "open camera \", inputId ) exit() # 3D model points. model_points = np.array([", "1 if rt_fps_count >= 10: t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True", "while True: # Update the real-time FPS counter if rt_fps_count == 0: rt_fps_time", "use_dlib_detection if use_dlib_detection == True: print(\"[INFO] face detection = DLIB\") else: print(\"[INFO] face", "Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "typing import List import cv2 import numpy as np import vart import pathlib", "- NMS threshold = ',nmsThreshold) # Initialize Vitis-AI/DPU based face detector densebox_xmodel =", "if the `d` key was pressed, toggle between detection algorithms if key ==", "status = status + \" Landmark=\" if use_dlib_landmarks == True: status = status", "== 1 # only one DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu)", "compliance with the License. You may obtain a copy of the License at", "input print(\"[INFO] starting camera input ...\") cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not", "use_dlib_detection == True: print(\"[INFO] face detection = DLIB\") else: print(\"[INFO] face detection =", "detection = VART\") # if the `l` key was pressed, toggle between landmark", "detThreshold = 0.55 else: detThreshold = float(args[\"detthreshold\"]) print('[INFO] face detector - softmax threshold", "in range(5): # x = int(landmarks[i,0]) # y = int(landmarks[i,1]) # cv2.circle( frame,", "Left Mouth corner (150.0, -150.0, -125.0) # Right mouth corner ]) # Camera", "FPS display rt_fps_count = 0; rt_fps_time = cv2.getTickCount() rt_fps_valid = False rt_fps =", "express or implied. See the License for the specific language governing permissions and", "== 1 # only one DPU kernel densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold)", "nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face detector - NMS threshold = ',nmsThreshold) # Initialize", "the nose location eye_center_x = (image_points[2][0] + image_points[3][0])/2; eye_center_y = (image_points[2][1] + image_points[3][1])/2;", "p1, p2, (255,0,0), 2) # Display Status status = \"Status :\" status =", "counter fps.update() # if the `q` key was pressed, break from the loop", "face landmarks with DLIB dlib_rect = dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) #", "key was pressed, toggle between detection algorithms if key == ord(\"d\"): use_dlib_detection =", "extract the face ROI startX = int(left) startY = int(top) endX = int(right)", "(cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps = 10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps)", "frame, (x,y), 3, (255,255,255), 2) # prepare 2D points image_points = np.array([ (landmarks[2,0],", "threshold = ',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else: nmsThreshold = float(args[\"nmsthreshold\"])", "left corner (225.0, 170.0, -135.0), # Right eye right corne (-150.0, -150.0, -125.0),", "onto the image plane. # We use this to draw a line sticking", "status + \" \" + rt_fps_message cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1,", "print(\"[INFO] face detection = VART\") # if the `l` key was pressed, toggle", "elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) # Stop the face detector dpu_face_detector.stop()", "face landmarks landmarks = dpu_face_landmark.process(face) # calculate coordinates for full frame for i", "cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p in image_points: #cv2.circle(face, (int(p[0]),", "# Right mouth corner ]) # Camera internals ret,frame = cam.read() size=frame.shape focal_length", "if use_dlib_landmarks == True: # extract face landmarks with DLIB dlib_rect = dlib.rectangle(", "this to draw a line sticking out of the nose (nose_end_point2D, jacobian) =", "0.35)\") args = vars(ap.parse_args()) if not args.get(\"input\",False): inputId = 0 else: inputId =", "vars(ap.parse_args()) if not args.get(\"input\",False): inputId = 0 else: inputId = int(args[\"input\"]) print('[INFO] input", "(0,255,0), 2) # extract the face ROI startX = int(left) startY = int(top)", "else: status = status + \"VART\" if rt_fps_valid == True: status = status", "face landmarks = DLIB\") else: print(\"[INFO] face landmarks = VART\") # Update the", "dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB based face detector dlib_face_detector = dlib.get_frontal_face_detector()", "i in range(5): landmarks[i,0] = startX + landmarks[i,0]*widthX landmarks[i,1] = startY + landmarks[i,1]*heightY", "ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks if use_dlib_landmarks == True: print(\"[INFO] face landmarks =", "chin # let's assume that the chin location will behave similar as the", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "# calculate head pose dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion (success,", "use this to draw a line sticking out of the nose (nose_end_point2D, jacobian)", "== 0: rt_fps_time = cv2.getTickCount() # Capture image from camera ret,frame = cam.read()", "python face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] from ctypes import * from", "fps = FPS().start() # init the real-time FPS display rt_fps_count = 0; rt_fps_time", "FPS counter fps = FPS().start() # init the real-time FPS display rt_fps_count =", "# Left Mouth corner (150.0, -150.0, -125.0) # Right mouth corner ]) #", "# Nose tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0), # Left", "applicable law or agreed to in writing, software distributed under the License is", "= cam.read() size=frame.shape focal_length = size[1] center = (size[1]/2, size[0]/2) camera_matrix = np.array(", "print(\"[INFO] face landmarks = DLIB\") # Initialize the camera input print(\"[INFO] starting camera", "+ \" Landmark=\" if use_dlib_landmarks == True: status = status + \"DLIB\" else:", "one DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB", "print(\"[INFO] face detection = VART\") print(\"[INFO] face landmarks = DLIB\") # Initialize the", "the loop if key == ord(\"q\"): break # if the `d` key was", "ap.add_argument(\"-i\", \"--input\", required=False, help = \"input camera identifier (default = 0)\") ap.add_argument(\"-d\", \"--detthreshold\",", "assert len(landmark_subgraphs) == 1 # only one DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark", ") #print(faces) # loop over the faces for i,(left,top,right,bottom) in enumerate(faces): # draw", "faces for i,(left,top,right,bottom) in enumerate(faces): # draw a bounding box surrounding the object", "(landmarks[4,0], landmarks[4,1]) # Right mouth corner ], dtype=\"double\") # estimate approximate location of", "eye right corne (dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner (dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right", "mouth corner ], dtype=\"double\") # estimate approximate location of chin # let's assume", "eye right corne (-150.0, -150.0, -125.0), # Left Mouth corner (150.0, -150.0, -125.0)", "2) # extract the face ROI startX = int(left) startY = int(top) endX", "the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for", "heightY = endY-startY face = frame[startY:endY, startX:endX] if use_dlib_landmarks == False: # extract", ">= 10: t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency() rt_fps_valid = True rt_fps = 10.0/t", "import * from typing import List import cv2 import numpy as np import", "= int(landmarks[i,0]) # y = int(landmarks[i,1]) # cv2.circle( frame, (x,y), 3, (255,255,255), 2)", "# cv2.circle( frame, (x,y), 3, (255,255,255), 2) # prepare 2D points image_points =", "= DLIB\") else: print(\"[INFO] face detection = VART\") # if the `l` key", "= int(right) endY = int(bottom) #print( startX, endX, startY, endY ) widthX =", "landmark algorithms if key == ord(\"l\"): use_dlib_landmarks = not use_dlib_landmarks if use_dlib_landmarks ==", "rt_fps_valid = True rt_fps = 10.0/t rt_fps_message = \"FPS: {0:.2f}\".format(rt_fps) #print(\"[INFO] \",rt_fps_message) rt_fps_count", "= vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB based face detector dlib_face_detector", "= 0.55 else: detThreshold = float(args[\"detthreshold\"]) print('[INFO] face detector - softmax threshold =", "center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype = \"double\" ) print(\"[INFO] Camera", "False: # extract face landmarks landmarks = dpu_face_landmark.process(face) # calculate coordinates for full", "= startY + landmarks[i,1]*heightY # draw landmarks #for i in range(5): # x", "vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB based face detector dlib_face_detector =", "0.35] from ctypes import * from typing import List import cv2 import numpy", "that the chin location will behave similar as the nose location eye_center_x =", "break # if the `d` key was pressed, toggle between detection algorithms if", "= dlib.get_frontal_face_detector() # Initialize DLIB based face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark =", "Chin (-225.0, 170.0, -135.0), # Left eye left corner (225.0, 170.0, -135.0), #", "Update the real-time FPS counter if rt_fps_count == 0: rt_fps_time = cv2.getTickCount() #", "if not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35 else: nmsThreshold = float(args[\"nmsthreshold\"]) print('[INFO] face detector", "(image_points[4][1] + image_points[5][1])/2; image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points) if", "2) # prepare 2D points image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip", "eye_center_x); nose_offset_y = (image_points[0][1] - eye_center_y); mouth_center_x = (image_points[4][0] + image_points[5][0])/2; mouth_center_y =", "help = \"face detector softmax threshold (default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "visualize it cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2) # extract the face ROI", "eye left corner (landmarks[1,0], landmarks[1,1]), # Right eye right corne (landmarks[3,0], landmarks[3,1]), #", "# draw landmarks #for i in range(5): # x = int(landmarks[i,0]) # y", "for p in image_points: #cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1) #cv2.circle(face, (int(p[0]), int(p[1])),", "cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR] Failed to open camera \", inputId ) exit()", "key was pressed, toggle between landmark algorithms if key == ord(\"l\"): use_dlib_landmarks =", "face detector - softmax threshold = ',detThreshold) if not args.get(\"nmsthreshold\",False): nmsThreshold = 0.35", "use_dlib_landmarks = True print(\"[INFO] face detection = VART\") print(\"[INFO] face landmarks = DLIB\")", "not args.get(\"input\",False): inputId = 0 else: inputId = int(args[\"input\"]) print('[INFO] input camera identifier", "dpu_face_detector.process(frame) #print(faces) if use_dlib_detection == True: # DLIB based face detector dlib_faces =", "under the License. ''' # USAGE # python face_headpose_dlib.py [--input 0] [--detthreshold 0.55]", "# loop over the faces for i,(left,top,right,bottom) in enumerate(faces): # draw a bounding", "{0:.2f}\".format(rt_fps) rt_fps_x = 10 rt_fps_y = size[0]-10 # loop over the frames from", "(x,y), 3, (255,255,255), 2) # prepare 2D points image_points = np.array([ (dlib_landmarks.part(30).x, dlib_landmarks.part(30).y),", "= \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1 #", "`d` key was pressed, toggle between detection algorithms if key == ord(\"d\"): use_dlib_detection", "FPS counter rt_fps_count = rt_fps_count + 1 if rt_fps_count >= 10: t =", "landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs = get_child_subgraph_dpu(landmark_graph) assert len(landmark_subgraphs) == 1 # only one", "image_points[3][0])/2; eye_center_y = (image_points[2][1] + image_points[3][1])/2; nose_offset_x = (image_points[0][0] - eye_center_x); nose_offset_y =", "dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left corner (dlib_landmarks.part(45).x, dlib_landmarks.part(45).y),", "# if the `q` key was pressed, break from the loop if key", "True: print(\"[INFO] face detection = DLIB\") else: print(\"[INFO] face detection = VART\") #", "\"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph = xir.Graph.deserialize(densebox_xmodel) densebox_subgraphs = get_child_subgraph_dpu(densebox_graph) assert len(densebox_subgraphs) == 1 # only", "Initialize Vitis-AI/DPU based face landmark landmark_xmodel = \"/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel\" landmark_graph = xir.Graph.deserialize(landmark_xmodel) landmark_subgraphs =", "(mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y); #print(image_points) if use_dlib_landmarks == True: # extract", "= ',nmsThreshold) # Initialize Vitis-AI/DPU based face detector densebox_xmodel = \"/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel\" densebox_graph =", "# init the real-time FPS display rt_fps_count = 0; rt_fps_time = cv2.getTickCount() rt_fps_valid", "coordinates for full frame for i in range(5): landmarks[i,0] = startX + landmarks[i,0]*widthX", "= status + \"DLIB\" else: status = status + \"VART\" if rt_fps_valid ==", "print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) # Stop the face detector", "focal_length, center[1]], [0, 0, 1]], dtype = \"double\" ) print(\"[INFO] Camera Matrix :\\n", "DPU kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB based", "surrounding the object so we can # visualize it cv2.rectangle( frame, (left,top), (right,bottom),", "so we can # visualize it cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2) #", "dlib.rectangle( startX,startY,endX,endY ) dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for i in range(dlib_landmarks.num_parts):", "dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect) # draw landmarks #for i in range(dlib_landmarks.num_parts): # x =", "[[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype = \"double\" )", "3D point (0, 0, 1000.0) onto the image plane. # We use this", "+ \"DLIB\" else: status = status + \"VART\" if rt_fps_valid == True: status", "= (image_points[0][1] - eye_center_y); mouth_center_x = (image_points[4][0] + image_points[5][0])/2; mouth_center_y = (image_points[4][1] +", "= startX + landmarks[i,0]*widthX landmarks[i,1] = startY + landmarks[i,1]*heightY # draw landmarks #for", "densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],\"run\") dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold) dpu_face_detector.start() # Initialize Vitis-AI/DPU based face landmark", "use_dlib_landmarks == False: # extract face landmarks landmarks = dpu_face_landmark.process(face) # calculate coordinates", "size=frame.shape focal_length = size[1] center = (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0,", "sys.path.append(os.path.abspath('./')) from vitis_ai_vart.facedetect import FaceDetect from vitis_ai_vart.facelandmark import FaceLandmark from vitis_ai_vart.utils import get_child_subgraph_dpu", "# Vitis-AI/DPU based face detector faces = dpu_face_detector.process(frame) #print(faces) if use_dlib_detection == True:", "parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-i\", \"--input\", required=False, help =", "(image_points[2][1] + image_points[3][1])/2; nose_offset_x = (image_points[0][0] - eye_center_x); nose_offset_y = (image_points[0][1] - eye_center_y);", "Nose tip (dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin (dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye", "cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR] Failed to open camera \", inputId )", "kernel landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],\"run\") dpu_face_landmark = FaceLandmark(landmark_dpu) dpu_face_landmark.start() # Initialize DLIB based face", "(-150.0, -150.0, -125.0), # Left Mouth corner (150.0, -150.0, -125.0) # Right mouth", "key == ord(\"d\"): use_dlib_detection = not use_dlib_detection if use_dlib_detection == True: print(\"[INFO] face", "import numpy as np import vart import pathlib import xir import os import", "-125.0) # Right mouth corner ]) # Camera internals ret,frame = cam.read() size=frame.shape", "time: {:.2f}\".format(fps.elapsed())) print(\"[INFO] elapsed FPS: {:.2f}\".format(fps.fps())) # Stop the face detector dpu_face_detector.stop() del", "(default = 0.55)\") ap.add_argument(\"-n\", \"--nmsthreshold\", required=False, help = \"face detector NMS threshold (default", "i in range(dlib_landmarks.num_parts): # x = int(dlib_landmarks.part(i).x) # y = int(dlib_landmarks.part(i).y) # cv2.circle(", "cam = cv2.VideoCapture(inputId) cam.set(cv2.CAP_PROP_FRAME_WIDTH,640) cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480) if not (cam.isOpened()): print(\"[ERROR] Failed to open camera", "(right,bottom), (0,255,0), 2) # extract the face ROI startX = int(left) startY =", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "{0}\".format(camera_matrix)); # start the FPS counter fps = FPS().start() # init the real-time", "], dtype=\"double\") #print(image_points) # calculate head pose dist_coeffs = np.zeros((4,1)) # Assuming no", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "= (image_points[2][1] + image_points[3][1])/2; nose_offset_x = (image_points[0][0] - eye_center_x); nose_offset_y = (image_points[0][1] -", "NMS threshold (default = 0.35)\") args = vars(ap.parse_args()) if not args.get(\"input\",False): inputId =", "face landmark dlib_landmark_model = \"./models/shape_predictor_68_face_landmarks.dat\" dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model) # algorithm selection use_dlib_detection =", "(255,255,255), 2) # draw head pose vector p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2", "was pressed, toggle between detection algorithms if key == ord(\"d\"): use_dlib_detection = not", "frame) key = cv2.waitKey(1) & 0xFF # Update the FPS counter fps.update() #" ]
[ "return self._percent @percent.setter def percent(self, value): self._percent = max(0.0, min(1.0, value)) def _draw(self):", "0.02, 1.0), 'FillColor2': (0.0, 0.42, 0.02, 1.0), 'FillColor3': (0.0, 0.42, 0.02, 1.0), 'FillColor4':", "the widget's parent :param name: the name of the widget :param percent: the", "= self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent # Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0],", "theme['BorderSize'] self._percent = percent @property def percent(self): return self._percent @percent.setter def percent(self, value):", "self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1],", "self._percent @percent.setter def percent(self, value): self._percent = max(0.0, min(1.0, value)) def _draw(self): \"\"\"Draw", "the widget :param percent: the initial percent :param sub_theme: sub type of theme", "aspect, size, pos, sub_theme, options) theme = self.theme self.fill_colors = [ theme['FillColor1'], theme['FillColor2'],", "] self.bg_colors = [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color = theme['BorderColor'] self.border", "y position :param options: various other options \"\"\" Widget.__init__(self, parent, name, aspect, size,", ":param sub_theme: sub type of theme to use :param aspect: constrain the widget", "0-1 floating point number.\"\"\" theme_section = 'ProgressBar' theme_options = { 'FillColor1': (0.0, 0.42,", "self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2],", "'BGColor2': (0, 0, 0, 1), 'BGColor3': (0, 0, 0, 1), 'BGColor4': (0, 0,", "self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() #", "x and y position :param options: various other options \"\"\" Widget.__init__(self, parent, name,", "self.fill_colors = [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors = [ theme['BGColor1'], theme['BGColor2'],", "self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b,", "size, pos, sub_theme, options) theme = self.theme self.fill_colors = [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'],", "def percent(self, value): self._percent = max(0.0, min(1.0, value)) def _draw(self): \"\"\"Draw the progress", "aspect: constrain the widget size to a specified aspect ratio :param size: a", "self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() #", "point number.\"\"\" theme_section = 'ProgressBar' theme_options = { 'FillColor1': (0.0, 0.42, 0.02, 1.0),", "fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x,", "glColor4f(r, g, b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i in range(4): glVertex2f(self.gl_position[i][0],", "use :param aspect: constrain the widget size to a specified aspect ratio :param", "width and height :param pos: a tuple containing the x and y position", "size: a tuple containing the width and height :param pos: a tuple containing", "number.\"\"\" theme_section = 'ProgressBar' theme_options = { 'FillColor1': (0.0, 0.42, 0.02, 1.0), 'FillColor2':", "progress bar\"\"\" # Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset", "min(1.0, value)) def _draw(self): \"\"\"Draw the progress bar\"\"\" # Enable alpha blending glEnable(GL_BLEND)", "which assumes percent as a 0-1 floating point number.\"\"\" theme_section = 'ProgressBar' theme_options", "self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw", "0, 1), 'BGColor3': (0, 0, 0, 1), 'BGColor4': (0, 0, 0, 1), 'BorderSize':", "'BorderSize': 1, 'BorderColor': (0, 0, 0, 1), } def __init__(self, parent, name=None, percent=1.0,", "1), 'BorderSize': 1, 'BorderColor': (0, 0, 0, 1), } def __init__(self, parent, name=None,", "self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x,", "self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3])", "self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw", "percent(self): return self._percent @percent.setter def percent(self, value): self._percent = max(0.0, min(1.0, value)) def", ".gl_utils import * from .widget import Widget, BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid progress", "aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param parent: the widget's parent :param", "import * from .widget import Widget, BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid progress bar.", "theme to use :param aspect: constrain the widget size to a specified aspect", "import Widget, BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid progress bar. Controlled via the 'percent'", "self.bg_colors = [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color = theme['BorderColor'] self.border =", "theme['BorderColor'] self.border = theme['BorderSize'] self._percent = percent @property def percent(self): return self._percent @percent.setter", "a 0-1 floating point number.\"\"\" theme_section = 'ProgressBar' theme_options = { 'FillColor1': (0.0,", "name: the name of the widget :param percent: the initial percent :param sub_theme:", "Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0)", "and y position :param options: various other options \"\"\" Widget.__init__(self, parent, name, aspect,", "(0.0, 0.42, 0.02, 1.0), 'FillColor4': (0.0, 0.42, 0.02, 1.0), 'BGColor1': (0, 0, 0,", "the progress bar\"\"\" # Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon", "sub_theme: sub type of theme to use :param aspect: constrain the widget size", "solid progress bar. Controlled via the 'percent' property which assumes percent as a", "value): self._percent = max(0.0, min(1.0, value)) def _draw(self): \"\"\"Draw the progress bar\"\"\" #", "options) theme = self.theme self.fill_colors = [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors", "(0, 0, 0, 1), } def __init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1,", "(0.0, 0.42, 0.02, 1.0), 'FillColor3': (0.0, 0.42, 0.02, 1.0), 'FillColor4': (0.0, 0.42, 0.02,", "glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1])", "self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent # Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1],", "a tuple containing the x and y position :param options: various other options", "options=BGUI_DEFAULT): \"\"\" :param parent: the widget's parent :param name: the name of the", "theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors = [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color", "size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param parent: the widget's parent :param name:", "glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x,", "value)) def _draw(self): \"\"\"Draw the progress bar\"\"\" # Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA,", "self.border_color = theme['BorderColor'] self.border = theme['BorderSize'] self._percent = percent @property def percent(self): return", "the initial percent :param sub_theme: sub type of theme to use :param aspect:", "0, 0, 1), 'BGColor2': (0, 0, 0, 1), 'BGColor3': (0, 0, 0, 1),", "Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options) theme = self.theme self.fill_colors =", "ProgressBar(Widget): \"\"\"A solid progress bar. Controlled via the 'percent' property which assumes percent", "options: various other options \"\"\" Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options)", "self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw outline", "theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color = theme['BorderColor'] self.border = theme['BorderSize'] self._percent =", "tuple containing the width and height :param pos: a tuple containing the x", "glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0] +", "Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3])", "0, 0, 1), 'BorderSize': 1, 'BorderColor': (0, 0, 0, 1), } def __init__(self,", "glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a = self.border_color", "BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid progress bar. Controlled via the 'percent' property which", "(0, 0, 0, 1), 'BorderSize': 1, 'BorderColor': (0, 0, 0, 1), } def", "0.02, 1.0), 'FillColor4': (0.0, 0.42, 0.02, 1.0), 'BGColor1': (0, 0, 0, 1), 'BGColor2':", "glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1],", "Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3])", "'FillColor4': (0.0, 0.42, 0.02, 1.0), 'BGColor1': (0, 0, 0, 1), 'BGColor2': (0, 0,", "1), 'BGColor4': (0, 0, 0, 1), 'BorderSize': 1, 'BorderColor': (0, 0, 0, 1),", "self.border = theme['BorderSize'] self._percent = percent @property def percent(self): return self._percent @percent.setter def", "self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1])", "1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param parent: the widget's parent :param name: the", "# Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2],", "= [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color = theme['BorderColor'] self.border = theme['BorderSize']", "size to a specified aspect ratio :param size: a tuple containing the width", "containing the width and height :param pos: a tuple containing the x and", "0.42, 0.02, 1.0), 'FillColor3': (0.0, 0.42, 0.02, 1.0), 'FillColor4': (0.0, 0.42, 0.02, 1.0),", "g, b, a = self.border_color glColor4f(r, g, b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS)", "GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0] + (self.gl_position[1][0]", "blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x =", "self._percent # Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1],", "glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i in range(4): glVertex2f(self.gl_position[i][0], self.gl_position[i][1]) glEnd() glPolygonMode(GL_FRONT, GL_FILL)", "name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param parent: the", "# Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2],", ":param aspect: constrain the widget size to a specified aspect ratio :param size:", "from .widget import Widget, BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid progress bar. Controlled via", ":param options: various other options \"\"\" Widget.__init__(self, parent, name, aspect, size, pos, sub_theme,", "glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw bg glBegin(GL_QUADS)", "self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1])", "Widget, BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid progress bar. Controlled via the 'percent' property", "self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1],", "glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1])", "self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2],", "and height :param pos: a tuple containing the x and y position :param", "[ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color = theme['BorderColor'] self.border = theme['BorderSize'] self._percent", "glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL)", "percent as a 0-1 floating point number.\"\"\" theme_section = 'ProgressBar' theme_options = {", "from .gl_utils import * from .widget import Widget, BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid", "floating point number.\"\"\" theme_section = 'ProgressBar' theme_options = { 'FillColor1': (0.0, 0.42, 0.02,", "= [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors = [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'],", "b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i in range(4): glVertex2f(self.gl_position[i][0], self.gl_position[i][1]) glEnd()", "0.42, 0.02, 1.0), 'FillColor2': (0.0, 0.42, 0.02, 1.0), 'FillColor3': (0.0, 0.42, 0.02, 1.0),", "b, a = self.border_color glColor4f(r, g, b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for", "to use :param aspect: constrain the widget size to a specified aspect ratio", "1.0), 'FillColor2': (0.0, 0.42, 0.02, 1.0), 'FillColor3': (0.0, 0.42, 0.02, 1.0), 'FillColor4': (0.0,", "@percent.setter def percent(self, value): self._percent = max(0.0, min(1.0, value)) def _draw(self): \"\"\"Draw the", "(self.gl_position[1][0] - self.gl_position[0][0]) * self._percent # Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3])", "self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2],", "g, b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i in range(4): glVertex2f(self.gl_position[i][0], self.gl_position[i][1])", "1), 'BGColor3': (0, 0, 0, 1), 'BGColor4': (0, 0, 0, 1), 'BorderSize': 1,", "'BGColor1': (0, 0, 0, 1), 'BGColor2': (0, 0, 0, 1), 'BGColor3': (0, 0,", "theme['BGColor3'], theme['BGColor4'], ] self.border_color = theme['BorderColor'] self.border = theme['BorderSize'] self._percent = percent @property", "theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors = [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ]", "'BGColor4': (0, 0, 0, 1), 'BorderSize': 1, 'BorderColor': (0, 0, 0, 1), }", "theme_options = { 'FillColor1': (0.0, 0.42, 0.02, 1.0), 'FillColor2': (0.0, 0.42, 0.02, 1.0),", "self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1],", "1.0) mid_x = self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent # Draw fill", "} def __init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT):", "sub type of theme to use :param aspect: constrain the widget size to", "percent :param sub_theme: sub type of theme to use :param aspect: constrain the", "{ 'FillColor1': (0.0, 0.42, 0.02, 1.0), 'FillColor2': (0.0, 0.42, 0.02, 1.0), 'FillColor3': (0.0,", "* self._percent # Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0],", "0.02, 1.0), 'BGColor1': (0, 0, 0, 1), 'BGColor2': (0, 0, 0, 1), 'BGColor3':", "- self.gl_position[0][0]) * self._percent # Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0],", "GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i in range(4): glVertex2f(self.gl_position[i][0], self.gl_position[i][1]) glEnd() glPolygonMode(GL_FRONT, GL_FILL) Widget._draw(self)", "self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2],", "= theme['BorderSize'] self._percent = percent @property def percent(self): return self._percent @percent.setter def percent(self,", "0, 1), } def __init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0,", "of theme to use :param aspect: constrain the widget size to a specified", "# Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a = self.border_color glColor4f(r, g, b,", "self.gl_position[0][0]) * self._percent # Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1])", "the widget size to a specified aspect ratio :param size: a tuple containing", "pos: a tuple containing the x and y position :param options: various other", "0, 1), 'BGColor2': (0, 0, 0, 1), 'BGColor3': (0, 0, 0, 1), 'BGColor4':", "ratio :param size: a tuple containing the width and height :param pos: a", "self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3])", "constrain the widget size to a specified aspect ratio :param size: a tuple", "theme['BGColor4'], ] self.border_color = theme['BorderColor'] self.border = theme['BorderSize'] self._percent = percent @property def", "theme['FillColor4'], ] self.bg_colors = [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color = theme['BorderColor']", "0, 1), 'BGColor4': (0, 0, 0, 1), 'BorderSize': 1, 'BorderColor': (0, 0, 0,", "bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0],", "the 'percent' property which assumes percent as a 0-1 floating point number.\"\"\" theme_section", "Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0])", "self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1])", "offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent", "various other options \"\"\" Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options) theme", "0.42, 0.02, 1.0), 'FillColor4': (0.0, 0.42, 0.02, 1.0), 'BGColor1': (0, 0, 0, 1),", "\"\"\"Draw the progress bar\"\"\" # Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable", "glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0],", "= { 'FillColor1': (0.0, 0.42, 0.02, 1.0), 'FillColor2': (0.0, 0.42, 0.02, 1.0), 'FillColor3':", "(0.0, 0.42, 0.02, 1.0), 'BGColor1': (0, 0, 0, 1), 'BGColor2': (0, 0, 0,", "(0, 0, 0, 1), 'BGColor2': (0, 0, 0, 1), 'BGColor3': (0, 0, 0,", "# Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0] + (self.gl_position[1][0] -", "pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param parent: the widget's parent :param name: the name", "progress bar. Controlled via the 'percent' property which assumes percent as a 0-1", "self.border_color glColor4f(r, g, b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i in range(4):", ":param parent: the widget's parent :param name: the name of the widget :param", "bar\"\"\" # Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL)", "def __init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\"", "] self.border_color = theme['BorderColor'] self.border = theme['BorderSize'] self._percent = percent @property def percent(self):", "initial percent :param sub_theme: sub type of theme to use :param aspect: constrain", "__init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param", "glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0],", "1), 'BGColor2': (0, 0, 0, 1), 'BGColor3': (0, 0, 0, 1), 'BGColor4': (0,", "glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0],", "(0, 0, 0, 1), 'BGColor3': (0, 0, 0, 1), 'BGColor4': (0, 0, 0,", "\"\"\" Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options) theme = self.theme self.fill_colors", "via the 'percent' property which assumes percent as a 0-1 floating point number.\"\"\"", "a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i in range(4): glVertex2f(self.gl_position[i][0], self.gl_position[i][1]) glEnd() glPolygonMode(GL_FRONT,", "self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3])", "def percent(self): return self._percent @percent.setter def percent(self, value): self._percent = max(0.0, min(1.0, value))", "mid_x = self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent # Draw fill glBegin(GL_QUADS)", "glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a = self.border_color glColor4f(r, g,", "bar. Controlled via the 'percent' property which assumes percent as a 0-1 floating", "parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param parent:", "as a 0-1 floating point number.\"\"\" theme_section = 'ProgressBar' theme_options = { 'FillColor1':", "= theme['BorderColor'] self.border = theme['BorderSize'] self._percent = percent @property def percent(self): return self._percent", "tuple containing the x and y position :param options: various other options \"\"\"", "glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0],", "0.02, 1.0), 'FillColor3': (0.0, 0.42, 0.02, 1.0), 'FillColor4': (0.0, 0.42, 0.02, 1.0), 'BGColor1':", ":param percent: the initial percent :param sub_theme: sub type of theme to use", "glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1])", ":param name: the name of the widget :param percent: the initial percent :param", "pos, sub_theme, options) theme = self.theme self.fill_colors = [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'],", "'percent' property which assumes percent as a 0-1 floating point number.\"\"\" theme_section =", "(0.0, 0.42, 0.02, 1.0), 'FillColor2': (0.0, 0.42, 0.02, 1.0), 'FillColor3': (0.0, 0.42, 0.02,", "self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3])", "alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x", "0, 0, 1), } def __init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1],", "position :param options: various other options \"\"\" Widget.__init__(self, parent, name, aspect, size, pos,", "_draw(self): \"\"\"Draw the progress bar\"\"\" # Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) #", "a specified aspect ratio :param size: a tuple containing the width and height", "class ProgressBar(Widget): \"\"\"A solid progress bar. Controlled via the 'percent' property which assumes", "self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2],", "glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g,", "widget size to a specified aspect ratio :param size: a tuple containing the", "'FillColor1': (0.0, 0.42, 0.02, 1.0), 'FillColor2': (0.0, 0.42, 0.02, 1.0), 'FillColor3': (0.0, 0.42,", "aspect ratio :param size: a tuple containing the width and height :param pos:", ".widget import Widget, BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid progress bar. Controlled via the", "\"\"\"A solid progress bar. Controlled via the 'percent' property which assumes percent as", "1.0), 'FillColor4': (0.0, 0.42, 0.02, 1.0), 'BGColor1': (0, 0, 0, 1), 'BGColor2': (0,", "theme['FillColor3'], theme['FillColor4'], ] self.bg_colors = [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color =", "options \"\"\" Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options) theme = self.theme", "parent: the widget's parent :param name: the name of the widget :param percent:", "type of theme to use :param aspect: constrain the widget size to a", "height :param pos: a tuple containing the x and y position :param options:", "sub_theme='', aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param parent: the widget's parent", "glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent #", "polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) *", "= max(0.0, min(1.0, value)) def _draw(self): \"\"\"Draw the progress bar\"\"\" # Enable alpha", "self._percent = percent @property def percent(self): return self._percent @percent.setter def percent(self, value): self._percent", "percent: the initial percent :param sub_theme: sub type of theme to use :param", "assumes percent as a 0-1 floating point number.\"\"\" theme_section = 'ProgressBar' theme_options =", "glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd()", "'ProgressBar' theme_options = { 'FillColor1': (0.0, 0.42, 0.02, 1.0), 'FillColor2': (0.0, 0.42, 0.02,", "\"\"\" :param parent: the widget's parent :param name: the name of the widget", "sub_theme, options) theme = self.theme self.fill_colors = [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ]", "'FillColor3': (0.0, 0.42, 0.02, 1.0), 'FillColor4': (0.0, 0.42, 0.02, 1.0), 'BGColor1': (0, 0,", "def _draw(self): \"\"\"Draw the progress bar\"\"\" # Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)", "glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x,", "0, 0, 1), 'BGColor4': (0, 0, 0, 1), 'BorderSize': 1, 'BorderColor': (0, 0,", "theme_section = 'ProgressBar' theme_options = { 'FillColor1': (0.0, 0.42, 0.02, 1.0), 'FillColor2': (0.0,", "the x and y position :param options: various other options \"\"\" Widget.__init__(self, parent,", "a tuple containing the width and height :param pos: a tuple containing the", "glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd()", "r, g, b, a = self.border_color glColor4f(r, g, b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border)", "# Enable alpha blending glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0,", "self.gl_position[3][1]) glEnd() # Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0],", "self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0],", "name, aspect, size, pos, sub_theme, options) theme = self.theme self.fill_colors = [ theme['FillColor1'],", "self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a", "'FillColor2': (0.0, 0.42, 0.02, 1.0), 'FillColor3': (0.0, 0.42, 0.02, 1.0), 'FillColor4': (0.0, 0.42,", "outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a = self.border_color glColor4f(r, g, b, a) glPolygonMode(GL_FRONT,", "widget's parent :param name: the name of the widget :param percent: the initial", "0.42, 0.02, 1.0), 'BGColor1': (0, 0, 0, 1), 'BGColor2': (0, 0, 0, 1),", "glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3]) glVertex2f(self.gl_position[2][0], self.gl_position[2][1]) glColor4f(self.bg_colors[3][0],", "1.0), 'FillColor3': (0.0, 0.42, 0.02, 1.0), 'FillColor4': (0.0, 0.42, 0.02, 1.0), 'BGColor1': (0,", "self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3]) glVertex2f(self.gl_position[1][0], self.gl_position[1][1]) glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3])", "0], options=BGUI_DEFAULT): \"\"\" :param parent: the widget's parent :param name: the name of", "glEnd() # Draw bg glBegin(GL_QUADS) glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3]) glVertex2f(mid_x, self.gl_position[0][1]) glColor4f(self.bg_colors[1][0], self.bg_colors[1][1],", "of the widget :param percent: the initial percent :param sub_theme: sub type of", "max(0.0, min(1.0, value)) def _draw(self): \"\"\"Draw the progress bar\"\"\" # Enable alpha blending", "Controlled via the 'percent' property which assumes percent as a 0-1 floating point", "1, 'BorderColor': (0, 0, 0, 1), } def __init__(self, parent, name=None, percent=1.0, sub_theme='',", "self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1]) glEnd() # Draw bg", "[ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors = [ theme['BGColor1'], theme['BGColor2'], theme['BGColor3'], theme['BGColor4'],", "self.theme self.fill_colors = [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors = [ theme['BGColor1'],", "(0, 0, 0, 1), 'BGColor4': (0, 0, 0, 1), 'BorderSize': 1, 'BorderColor': (0,", "widget :param percent: the initial percent :param sub_theme: sub type of theme to", "= self.theme self.fill_colors = [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors = [", "glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0],", "self.gl_position[3][1]) glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a = self.border_color glColor4f(r,", "@property def percent(self): return self._percent @percent.setter def percent(self, value): self._percent = max(0.0, min(1.0,", "percent(self, value): self._percent = max(0.0, min(1.0, value)) def _draw(self): \"\"\"Draw the progress bar\"\"\"", "the width and height :param pos: a tuple containing the x and y", "percent @property def percent(self): return self._percent @percent.setter def percent(self, value): self._percent = max(0.0,", "'BGColor3': (0, 0, 0, 1), 'BGColor4': (0, 0, 0, 1), 'BorderSize': 1, 'BorderColor':", "specified aspect ratio :param size: a tuple containing the width and height :param", "property which assumes percent as a 0-1 floating point number.\"\"\" theme_section = 'ProgressBar'", "glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent # Draw", "Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a = self.border_color glColor4f(r, g, b, a)", "glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a = self.border_color glColor4f(r, g, b, a) glPolygonMode(GL_FRONT, GL_LINE)", "parent :param name: the name of the widget :param percent: the initial percent", "* from .widget import Widget, BGUI_DEFAULT class ProgressBar(Widget): \"\"\"A solid progress bar. Controlled", "other options \"\"\" Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options) theme =", "self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r, g, b, a =", "= 'ProgressBar' theme_options = { 'FillColor1': (0.0, 0.42, 0.02, 1.0), 'FillColor2': (0.0, 0.42,", "= self.border_color glColor4f(r, g, b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i in", "self.gl_position[2][1]) glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3]) glVertex2f(mid_x, self.gl_position[3][1]) glEnd() # Draw outline glDisable(GL_POLYGON_OFFSET_FILL) r,", "= percent @property def percent(self): return self._percent @percent.setter def percent(self, value): self._percent =", "0, 1), 'BorderSize': 1, 'BorderColor': (0, 0, 0, 1), } def __init__(self, parent,", "0, 0, 1), 'BGColor3': (0, 0, 0, 1), 'BGColor4': (0, 0, 0, 1),", "the name of the widget :param percent: the initial percent :param sub_theme: sub", "to a specified aspect ratio :param size: a tuple containing the width and", "1), } def __init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0, 0],", "a = self.border_color glColor4f(r, g, b, a) glPolygonMode(GL_FRONT, GL_LINE) glLineWidth(self.border) glBegin(GL_QUADS) for i", "theme['BGColor2'], theme['BGColor3'], theme['BGColor4'], ] self.border_color = theme['BorderColor'] self.border = theme['BorderSize'] self._percent = percent", "1.0), 'BGColor1': (0, 0, 0, 1), 'BGColor2': (0, 0, 0, 1), 'BGColor3': (0,", "percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT): \"\"\" :param parent: the widget's", "+ (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent # Draw fill glBegin(GL_QUADS) glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2],", "self._percent = max(0.0, min(1.0, value)) def _draw(self): \"\"\"Draw the progress bar\"\"\" # Enable", ":param size: a tuple containing the width and height :param pos: a tuple", "theme = self.theme self.fill_colors = [ theme['FillColor1'], theme['FillColor2'], theme['FillColor3'], theme['FillColor4'], ] self.bg_colors =", "glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # Enable polygon offset glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, 1.0) mid_x = self.gl_position[0][0]", "self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3]) glVertex2f(self.gl_position[0][0], self.gl_position[0][1]) glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3]) glVertex2f(mid_x, self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1],", "containing the x and y position :param options: various other options \"\"\" Widget.__init__(self,", "'BorderColor': (0, 0, 0, 1), } def __init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None,", "self.gl_position[1][1]) glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3]) glVertex2f(mid_x, self.gl_position[2][1]) glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3]) glVertex2f(self.gl_position[3][0], self.gl_position[3][1])", "parent, name, aspect, size, pos, sub_theme, options) theme = self.theme self.fill_colors = [", ":param pos: a tuple containing the x and y position :param options: various", "name of the widget :param percent: the initial percent :param sub_theme: sub type" ]
[ "attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。", "__str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self,", "定义类的属性到列的映射: id = IntegerField('id') name = StringField('username') email = StringField('email') password = StringField('password')", "# 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import Hello h = Hello() h.hello() print(type(Hello))", "除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: '''", "self.column_type = column_type def __str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: '''", "__init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self, name): super(IntegerField, self).__init__(name, 'bigint')", "################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。", "metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。", "正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id", "Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id = IntegerField('id') name", "h = Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class", "Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: '''", "= IntegerField('id') name = StringField('username') email = StringField('email') password = StringField('password') # 创建一个实例:", "保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self, name, column_type): self.name", "动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self, name='world'): print('Hello, %s.' %", "4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: ''' # L2", "name, column_type): self.name = name self.column_type = column_type def __str__(self): return '<%s:%s>' %", "当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L =", "''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): #", "''' class Field(object): def __init__(self, name, column_type): self.name = name self.column_type = column_type", "''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self, name='world'): #", "u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: '''", "3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass ################# '''", "bases, attrs): attrs['add'] = lambda self, value: self.append(value) return type.__new__(cls, name, bases, attrs)", "lambda self, value: self.append(value) return type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class", "仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是:", "= name self.column_type = column_type def __str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name) '''", "2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass #################", "h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def", "# L2 = list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。", "# 先定义函数 print('Hello, %s.' % name) Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello", "# Hello的class对象,测试如下: from hello import Hello h = Hello() h.hello() print(type(Hello)) print(type(h)) '''", "ListMetaclass(type): def __new__(cls, name, bases, attrs): attrs['add'] = lambda self, value: self.append(value) return", "class Hello(object)...的定义: ''' def fn(self, name='world'): # 先定义函数 print('Hello, %s.' % name) Hello", "L.add(1) print(L) ''' 而普通的list没有add()方法: ''' # L2 = list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗?", "class User(Model): # 定义类的属性到列的映射: id = IntegerField('id') name = StringField('username') email = StringField('email')", "# 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self, name='world'): print('Hello, %s.' % name) '''", "''' class StringField(Field): def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self,", "= lambda self, value: self.append(value) return type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: '''", "bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。", "先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls,", "def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self, name): super(IntegerField, self).__init__(name,", "################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法:", "# use_type ############ # type() ''' 动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object):", "name='world'): print('Hello, %s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import", "''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是:", "创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self, name='world'): # 先定义函数 print('Hello, %s.' % name)", "% (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self, name): super(StringField, self).__init__(name,", "class Field(object): def __init__(self, name, column_type): self.name = name self.column_type = column_type def", "def __str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def", "self.name = name self.column_type = column_type def __str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name)", "type('Hello', (object,), dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。", "测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: ''' # L2 =", "password = StringField('password') # 创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库:", "print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self, name='world'):", "L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model):", "''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import Hello h = Hello() h.hello()", "def __new__(cls, name, bases, attrs): attrs['add'] = lambda self, value: self.append(value) return type.__new__(cls,", "id = IntegerField('id') name = StringField('username') email = StringField('email') password = StringField('password') #", "############ # type() ''' 动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self,", "# metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name, bases, attrs): attrs['add'] = lambda self,", "return '<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self, name):", "super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self, name): super(IntegerField, self).__init__(name, 'bigint') ''' 下一步,就是编写最复杂的ModelMetaclass了:", "Field(object): def __init__(self, name, column_type): self.name = name self.column_type = column_type def __str__(self):", "value: self.append(value) return type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass):", "= list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。", "编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id = IntegerField('id') name = StringField('username') email =", "我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name, bases, attrs): attrs['add']", "正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以:", "StringField('username') email = StringField('email') password = StringField('password') # 创建一个实例: u = User(id=12345, name='Michael',", "Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。", "= StringField('username') email = StringField('email') password = StringField('password') # 创建一个实例: u = User(id=12345,", "__init__(self, name, column_type): self.name = name self.column_type = column_type def __str__(self): return '<%s:%s>'", "# 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串", "连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def", "pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: '''", "但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id =", "name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def", "1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1) print(L) ''' 而普通的list没有add()方法:", "动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射:", "创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。", "而普通的list没有add()方法: ''' # L2 = list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object", "= StringField('email') password = StringField('password') # 创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>')", "但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type):", "Hello(object)...的定义: ''' def fn(self, name='world'): # 先定义函数 print('Hello, %s.' % name) Hello =", "def __init__(self, name, column_type): self.name = name self.column_type = column_type def __str__(self): return", "%s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import Hello h", "所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name,", "(object,), dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class", "print('Hello, %s.' % name) Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class '''", "其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self, name, column_type): self.name = name", "print('Hello, %s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import Hello", "metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' #", "# 创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。", "metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法:", "__new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1) print(L) '''", "self.append(value) return type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass", "创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型:", "list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码:", "dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说", "name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时,", "''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass:", "class StringField(Field): def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self, name):", "当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import Hello h = Hello() h.hello() print(type(Hello)) print(type(h))", "3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: ''' #", "= type('Hello', (object,), dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时,", "u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self, name, column_type): self.name =", "def fn(self, name='world'): # 先定义函数 print('Hello, %s.' % name) Hello = type('Hello', (object,),", "return type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass '''", "StringField('password') # 创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。", "% name) Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称;", "metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name, bases,", "''' # L2 = list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational", "User(Model): # 定义类的属性到列的映射: id = IntegerField('id') name = StringField('username') email = StringField('email') password", "要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# #", "'<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self, name): super(StringField,", "MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合", "# L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class", "class Hello(object): def hello(self, name='world'): print('Hello, %s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 #", "import Hello h = Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数", "fn(self, name='world'): # 先定义函数 print('Hello, %s.' % name) Hello = type('Hello', (object,), dict(hello=fn))", "IntegerField('id') name = StringField('username') email = StringField('email') password = StringField('password') # 创建一个实例: u", "type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了,", "''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field):", "name, bases, attrs): attrs['add'] = lambda self, value: self.append(value) return type.__new__(cls, name, bases,", "email = StringField('email') password = StringField('password') # 创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>',", "Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id = IntegerField('id') name =", "self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class", "(self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)')", "name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self, name): super(IntegerField, self).__init__(name, 'bigint') '''", "MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: ''' # L2 = list() # L2.add(1) '''", "让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id = IntegerField('id') name = StringField('username') email", "L2 = list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。", "要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id = IntegerField('id') name = StringField('username')", "= column_type def __str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class", "Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self, name='world'): # 先定义函数 print('Hello,", "use_type ############ # type() ''' 动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def", "''' class Hello(object): def hello(self, name='world'): print('Hello, %s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个", "所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name, bases, attrs):", "L = MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: ''' # L2 = list() #", "StringField('email') password = StringField('password') # 创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') #", "name='world'): # 先定义函数 print('Hello, %s.' % name) Hello = type('Hello', (object,), dict(hello=fn)) #", "attrs['add'] = lambda self, value: self.append(value) return type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass:", "name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import Hello h = Hello()", "1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass", "''' def fn(self, name='world'): # 先定义函数 print('Hello, %s.' % name) Hello = type('Hello',", "print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self,", "User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object):", "class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 '''", "Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。", "self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self, name): super(IntegerField, self).__init__(name, 'bigint') ''' 下一步,就是编写最复杂的ModelMetaclass了: '''", "= User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class", "class ListMetaclass(type): def __new__(cls, name, bases, attrs): attrs['add'] = lambda self, value: self.append(value)", "# 定义类的属性到列的映射: id = IntegerField('id') name = StringField('username') email = StringField('email') password =", "StringField(Field): def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self, name): super(IntegerField,", "首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self, name, column_type): self.name = name self.column_type =", "先定义函数 print('Hello, %s.' % name) Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class", "# 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self, name, column_type):", "ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id = IntegerField('id')", "email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self,", "我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self, name='world'): # 先定义函数 print('Hello, %s.'", "''' class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字", "column_type def __str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field):", "''' ################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。", "= MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: ''' # L2 = list() # L2.add(1)", "在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1) print(L)", "= Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义:", "type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self, name='world'): # 先定义函数", "''' L = MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: ''' # L2 = list()", "Hello(object): def hello(self, name='world'): print('Hello, %s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下:", "% name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import Hello h =", "要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1)", "= StringField('password') # 创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save()", "hello(self, name='world'): print('Hello, %s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello", "''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' #################", "metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name, bases, attrs): attrs['add'] = lambda self, value:", "type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self, name='world'): # 先定义函数 print('Hello, %s.' %", "Hello h = Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过", "有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象", "先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class", "%s.' % name) Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数:", "attrs): attrs['add'] = lambda self, value: self.append(value) return type.__new__(cls, name, bases, attrs) '''", "定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name, bases, attrs): attrs['add'] =", "再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是:", "在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def", "class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合", "# metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况,", "hello import Hello h = Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。", "name) Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法;", "''' 而普通的list没有add()方法: ''' # L2 = list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。", "name = StringField('username') email = StringField('email') password = StringField('password') # 创建一个实例: u =", "column_type): self.name = name self.column_type = column_type def __str__(self): return '<%s:%s>' % (self.__class__.__name__,", "name self.column_type = column_type def __str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等:", "type() ''' 动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self, name='world'): print('Hello,", "def hello(self, name='world'): print('Hello, %s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from", "现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self, name, column_type): self.name = name self.column_type", "当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生:", "比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self, name='world'): print('Hello, %s.' % name) ''' #", "Hello的class对象,测试如下: from hello import Hello h = Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class", "''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name, bases, attrs): attrs['add'] = lambda", "它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList()", "''' 动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self, name='world'): print('Hello, %s.'", "# type() ''' 动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self, name='world'):", "''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self, name='world'): print('Hello, %s.' % name)", "通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass.", "动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。", "password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self, name,", "print(L) ''' 而普通的list没有add()方法: ''' # L2 = list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。", "from hello import Hello h = Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello.", "############ # use_type ############ # type() ''' 动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class", "2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: '''", "self, value: self.append(value) return type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list,", "__new__(cls, name, bases, attrs): attrs['add'] = lambda self, value: self.append(value) return type.__new__(cls, name,", "''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L" ]
[ "np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True,", "0, -1]]) safety_stock = 10 state = np.array([20, 30, 30, 20])[:, None] env_params", "== 1 and np.all(action[2:4] == np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations,", "30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0],", "0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])", "[0.25], [0.25], [0.25], [0.25]]) num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i", "priorities = {0: 0, 2: 1} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix =", "= 10 state = np.array([30, 20, 20, 30])[:, None] env_params = get_null_env_params( state,", "1, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0], [0,", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_random_heuristic_agent():", "state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0.,", "None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None: constituency_matrix = np.zeros((num_resources, num_resources))", "get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent", "def test_random_heuristic_agent_starving(): # Single server queue safety_stock = 10.0 state = 5 *", "1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one scheduling two buffers. The", "[i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1),", "larger than the other, both above safety stock (swap # order with previous", "3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action =", "safety_stock = 9.9 state = 10 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two buffers,", "and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1: 2, 2:", "action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_random_heuristic_agent(): # Single server", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve():", "0., -1., -1., 0., -1.], [0., -1., -1., 0., 0., 0., 0.], [0.,", "= agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): #", "agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers,", "custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate = np.ones_like(state) if", "Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 4.0 state =", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert action == np.ones((1,", "[i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2)", "above safety stock safety_stock = 10.0 state = 5 * np.ones((3, 1)) env_params", "buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.],", "buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities", "expected_action = np.array([[1], [0], [1], [0], [1], [0], [0]]) activities = np.array([1, 2])", "np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1: 2, 2: 5}", "np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations,", "[np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1, 1, 0])[:,", "0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)", "queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 4.0 state = 5 *", "priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1: 2, 2: 5} state =", "5 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] =", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station", "num_resources)) time_interval = 1 return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\":", "[0, 0, 1, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent =", "1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent", "np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0, 1: 2, 2:", "scheduling two buffers, each of them having to be above safety stock. safety_stock", "= np.array([30, 20, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate = np.ones_like(state) if num_resources is None: num_resources", "# stock. safety_stock = 9.9 state = np.array([5, 5, 5, 4])[:, None] env_params", "num_sim)) for i in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1)", "= agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): #", "1 return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix,", "agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]])", "crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25],", "One station scheduling two buffers, both equal and above safety stock. buffer_processing_matrix =", "buffers, each of them having to be above safety stock safety_stock = 1.0", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): #", "state = np.array([30, 20, 20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env", "-1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]])", "5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0.,", "2: 5} expected_priorities = {0: 0, 1: None, 2: 5} state = np.array([[10.],", "1: 2, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0.,", "== priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2: 5} expected_priorities = {0:", "above # safety stock, swap order with respect to previous test. buffer_processing_matrix =", "each of them having to be above safety stock safety_stock = 1.0 state", "safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations,", "= np.array([[1], [0], [1], [0], [0], [0], [0]]) expected_action = np.array([[1], [0], [1],", "state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state)", "[0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1.,", "agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]])", "1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4])", "== np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two buffers, one", "[0], [1], [0], [0], [0], [0]]) expected_action = np.array([[1], [0], [1], [0.25], [0.25],", "1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params)", "above safety stock. safety_stock = 10.0 state = 5 * np.ones((4, 1)) env_params", "= 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)): actions[:, [i]] =", "buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] =", "2, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0.,", "5, 4])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0,", "1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\")", "10])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] =", "safety_stock = 10.0 state = 3 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 10.0", "snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent", "= 10.0 state = np.array([9, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers, the", "0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 30, 9,", "-1., 0., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]])", "0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))]", "None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two buffers, both equal and above", "action = np.array([[1], [0], [1], [0], [0], [0], [0]]) activities = np.array([3, 4,", "test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers, the sum of their size having to", "to be above safety # stock. safety_stock = 9.9 state = 5 *", "1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env", "= crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert action ==", "stock safety_stock = 10.0 state = 5 * np.ones((3, 1)) env_params = get_null_env_params(state)", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two", "0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and np.all(action[0:2] ==", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 1])[:,", "state.shape[0] arrival_rate = np.ones_like(state) if num_resources is None: num_resources = num_buffers if buffer_processing_matrix", "- np.eye(2) safety_stock = 10.0 state = np.array([11, 11])[:, None] env_params = get_null_env_params(", "scheduling two buffers, the sum of their size having to be above safety", "5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1.,", "0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.],", "state = np.array([11, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1,", "0.], [0., 0., 0., 1., 1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params =", "the other, but both below safety stock. buffer_processing_matrix = - np.eye(2) safety_stock =", "priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]]) expected_action = np.array([[1],", "2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one scheduling two buffers, the sum", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [0], [0], [1], [0], [0]]) expected_action =", "other. Only the large one is above # safety stock. buffer_processing_matrix = -", "np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers, each of them having", "np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action", "= {0: 0, 1: None, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix", "1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action", "safety # stock. safety_stock = 9.9 state = 5 * np.ones((4, 1)) env_params", "above safety stock safety_stock = 10.0 state = 3 * np.ones((3, 1)) env_params", "5, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 1,", "env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env,", "import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): #", "= get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1,", "np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers.", "== 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one scheduling two buffers, the", "stock. safety_stock = 9.9 state = np.array([5, 5, 5, 4])[:, None] env_params =", "1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two buffers, one larger than", "[0], [0], [1], [0], [0]]) expected_action = np.array([[1], [0], [1], [0], [1], [0],", "= int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): updated_action[:, [i]] =", "larger than the other, but both below safety stock. buffer_processing_matrix = - np.eye(2)", "np.array([11, 10])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"]", "np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two buffers, one", "np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action =", "1.1 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] =", "num_sim)) for i in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action =", "each one scheduling two buffers, each of them having to be above safety", "having to be above safety # stock. safety_stock = 9.9 state = np.array([5,", "= crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1,", "buffers, each of them having to be above safety stock. safety_stock = 9.9", "np.array([[-1., 0., 0., -1., -1., 0., -1.], [0., -1., -1., 0., 0., 0.,", "agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_random_heuristic_agent(): # Single server queue safety_stock", "their size having to be above # safety stock. safety_stock = 10 state", "4])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0,", "5, 5, 4])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0],", "above safety # stock. safety_stock = 9.9 state = 5 * np.ones((4, 1))", "axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0:", "agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0:", "as si import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1, 1, 0])[:, None]) def", "[0., -1., 0., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1.,", "buffers, one larger than the other. Only the large one is above #", "np.eye(2) safety_stock = 10.0 state = np.array([30, 20])[:, None] env_params = get_null_env_params( state,", "agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers,", "buffers. The stations are connected in serial, such that # buffer 1 is", "num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): updated_action[:, [i]]", "safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([30, 20])[:,", "buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1,", "state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0.,", "test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.],", "action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def", "1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]])", "three buffers, the sum of their size having to be above safety stock", "0.], [0., -1., 0., 0., 0., 0., 0.], [0., 0., 0., -1., -1.,", "assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling", "- np.eye(2) safety_stock = 10.0 state = np.array([30, 20])[:, None] env_params = get_null_env_params(", "priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]]) activities = np.array([3,", "1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent", "0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) constituency_matrix_original =", "np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))),", "= [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert", "None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one scheduling two buffers. The stations", "np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action", "def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one scheduling two buffers. The stations are", "two buffers. The stations are connected in serial, such that # buffer 1", "safety stock, swap order with respect to previous test. buffer_processing_matrix = - np.eye(2)", "agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one", "= 10.0 state = 3 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0,", "[0.25], [0.25], [0.25]]) num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in", "0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two", "np.random.seed(41) priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.],", "priorities) action = np.array([[1], [0], [0], [0], [1], [0], [0]]) expected_action = np.array([[1],", "= agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_random_heuristic_agent(): # Single server queue", "np.eye(2) safety_stock = 10.0 state = np.array([20, 30])[:, None] env_params = get_null_env_params( state,", "np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action", "snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as", "0, 1: 2, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1.,", "5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1))", "= np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0., -1.],", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and", "be above safety stock. safety_stock = 10.0 state = 5 * np.ones((4, 1))", "= np.array([30, 30, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "= crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities", "0, -1]]) safety_stock = 10 state = np.array([30, 20, 20, 30])[:, None] env_params", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2:", "== updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0, 1: 2, 2: 5}", "= 10.0 state = np.array([9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.],", "action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations,", "# One station scheduling two buffers, one larger than the other. Only the", "def test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock =", "# One station scheduling two buffers, one larger than the other, both above", "station scheduling two buffers, both equal and above safety stock. buffer_processing_matrix = -", "activities = np.array([3, 4, 5, 6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action", "= 10 state = 4 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "assert np.all(action == np.array([0, 1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations,", "test_random_heuristic_agent(): # Single server queue safety_stock = 1.0 state = 1.1 * np.ones((1,", "decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities =", "action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One", "both below safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state =", "== np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two buffers, one", "= agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station", "1)) safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params(", "4, 5, 6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action == updated_action) def", "def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0, 1: 2, 2: 5} state =", "[0], [0], [0], [0]]) activities = np.array([3, 4, 5, 6]) updated_action = agent.sample_random_actions(state=state,", "safety stock safety_stock = 1.0 state = 1.1 * np.ones((3, 1)) env_params =", "0., 0.], [0., 0., 0., 0., 0., -1., 0.]]) constituency_matrix = np.array([[1., 0.,", "buffer 3, and 2 with 4. # Kind of condition doesn't matter since", "test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers, the sum of their", "priorities) assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2: 5}", "0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0,", "with 4. # Kind of condition doesn't matter since the largest buffer has", "[np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent =", "1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _", "= np.array([11, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2))", "action = np.array([[1], [0], [1], [0], [0], [0], [0]]) expected_action = np.array([[1], [0],", "np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def", "previous test. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11, 10])[:,", "[0], [1], [0.25], [0.25], [0.25], [0.25]]) activities = np.array([3, 4, 5, 6]) num_sim", "= np.array([[1, 1, 1, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1,", "[0], [0]]) expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]) activities =", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]]) activities", "server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 4.0 state = 5", "np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two", "1.1 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] =", "scheduling two buffers, both equal and above safety stock. buffer_processing_matrix = - np.eye(2)", "def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one scheduling two buffers, the sum of", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1))) def", "== np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers, each of them", "agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0,", "1, 0, 0], [0, 0, 1, 0]]), np.array([[0, 0, 1, 1]])] env =", "action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1))) def", "and np.all(action[2:4] == np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one", "= 9.9 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0.,", "== np.array([0, 1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one", "= agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:, None]) def", "the sum of their size having to be above safety stock safety_stock =", "np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0.,", "np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0, 1: 2,", "two buffers, one larger than the other, both above safety stock (swap #", "1.0 state = 1.1 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1,", "0., 0., -1., 0.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],", "[np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert", "the other. Only the large one is above # safety stock, swap order", "return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval", "def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.],", "expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]]) num_sim = 5e4 actions", "[10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1.,", "30, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1,", "== np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one scheduling two", "np.eye(2) safety_stock = 10.0 state = np.array([11, 10])[:, None] env_params = get_null_env_params( state,", "1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env,", "agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2: 1} state =", "np.all(action == np.array([0, 1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each", "np.zeros((1, 1)) def test_random_heuristic_agent(): # Single server queue safety_stock = 1.0 state =", "crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [0], [0], [1], [0],", "np.array([4, 5, 5, 5])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0,", "activities=activities) average_updated_action = np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities():", "3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action)", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition():", "np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities =", "1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers, each of them having to", "} def test_random_heuristic_agent_starving(): # Single server queue safety_stock = 10.0 state = 5", "arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving():", "if buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None: constituency_matrix", "= {} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0.,", "= 10.0 state = 5 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1,", "stock safety_stock = 10.0 state = 3 * np.ones((3, 1)) env_params = get_null_env_params(state)", "env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))]", "1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env =", "above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([30,", "0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env =", "= agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one", "= np.array([4, 5, 5, 5])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0,", "0, 0], [0, 0, 1, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params)", "np.array([5, 5, 5, 4])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0,", "\"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving(): # Single server queue", "1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env", "state = np.array([11, 10])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1,", "= get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env", "constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1: 2,", "Single server queue safety_stock = 1.0 state = 1.1 * np.ones((1, 1)) env_params", "[10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1.,", "0, 1: 2, 2: 5} state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1.,", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None])", "larger than the other, both above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock", "= constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env,", "crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1)))", "\"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving(): # Single server queue safety_stock =", "= np.array([20, 30, 30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "[0., 0., 0., 1., 1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state,", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 0])[:,", "np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] =", "env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) ==", "= - np.eye(2) safety_stock = 10.0 state = np.array([20, 30])[:, None] env_params =", "1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0], [0, 0, 1, 0]]), np.array([[0, 0,", "{0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix =", "0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix)", "{} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0.,", "0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1.,", "0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0.,", "1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action", "None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval = 1 return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\":", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): #", "them having to be above safety stock. safety_stock = 10.0 state = 5", "- np.ones((1, 1)) safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params", "actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action", "(swap # order with previous test). buffer_processing_matrix = - np.eye(2) safety_stock = 10.0", "in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1,", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim", "def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers, the sum of their size having", "agent.map_state_to_actions(state) assert (action[0] + action[2] == 1) and (action[1] == 0) and (action[3]", "0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix =", "scheduling two buffers, one larger than the other, but both below safety stock.", "agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2: 5} expected_priorities =", "priorities) expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]]) num_sim = 5e4", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond():", "True, \"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving(): # Single server queue safety_stock = 10.0", "having to be above safety # stock. safety_stock = 9.9 state = np.array([4,", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): #", "= agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station", "above safety # stock. safety_stock = 9.9 state = np.array([5, 5, 5, 4])[:,", "def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers, the sum of", "stations, each one scheduling two buffers, the sum of their size having to", "1: 2, 2: 5} state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0.,", "= 4 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0,", "5 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] =", "1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with", "np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action =", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def", "[0.33], [0.], [0.33]]) num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in", "0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0.,", "9.9 state = np.array([4, 5, 5, 5])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim = int(1e4) actions", "-1]]) safety_stock = 10 state = np.array([30, 20, 5, 20])[:, None] env_params =", "1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers, the sum of their size", "def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two buffers, one larger than the other.", "1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action", "test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two buffers, one larger than the other. Only", "three buffers, each of them having to be above safety stock safety_stock =", "{ \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ),", "[0.25], [0.25]]) num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim):", "0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10", "def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers, each of them having to be", "0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two buffers, one larger than", "= np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0.,", "== np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers,", "buffer 1 is connected with buffer 3, and 2 with 4. # Kind", "0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0.,", "= get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities", "= crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1", "np import pytest import snc.environments.job_generators.discrete_review_job_generator \\ as drjg import snc.environments.controlled_random_walk as crw import", "test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two buffers, one larger than the other, both", "agent. buffer_processing_matrix = np.array([[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0,", "env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0], [1], [0],", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord():", "agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling", "def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2: 5} expected_priorities = {0: 0, 1:", "safety_stock = 9.9 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2: 1} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix", "test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.],", "env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params)", "action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each", "agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0:", "get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 1, 0], [0, 0, 1, 1]])", "stock (swap # order with previous test). buffer_processing_matrix = - np.eye(2) safety_stock =", "= np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],", "swap order with respect to previous test. buffer_processing_matrix = - np.eye(2) safety_stock =", "1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1: 2, 2: 5} state =", "[1], [0], [0], [0], [0]]) activities = np.array([3, 4, 5, 6]) updated_action =", "Station scheduling three buffers, each of them having to be above safety stock", "np.array([[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0,", "safety_stock = 10 state = np.array([30, 20, 9, 5])[:, None] env_params = get_null_env_params(", "agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2)", "- np.eye(2) safety_stock = 10.0 state = np.array([20, 30])[:, None] env_params = get_null_env_params(", "priorities = {0: 0, 2: 5} expected_priorities = {0: 0, 1: None, 2:", "doesn't matter since the largest buffer has to be above safety stock in", "np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state = np.array([[10.],", "safety stock (swap # order with previous test). buffer_processing_matrix = - np.eye(2) safety_stock", "[0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 20, 30])[:,", "has to be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1,", "buffer has to be above safety stock in this # agent. buffer_processing_matrix =", "0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1.,", "def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The stations are", "expected_priorities = {0: 0, 1: None, 2: 5} state = np.array([[10.], [10.], [10.]])", "5, 5])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0,", "0) and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1: 2,", "env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env", "_ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1: 2, 2:", "1.0 state = 1.1 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1,", "to be above safety stock safety_stock = 10.0 state = 3 * np.ones((3,", "env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given():", "10.0 state = np.array([11, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling", "# Station scheduling three buffers, each of them having to be above safety", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): #", "20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] =", "env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action =", "get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent", "if constituency_matrix is None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval = 1 return {", "this # agent. buffer_processing_matrix = np.array([[-1, 0, -1, 0], [0, -1, 0, 0],", "as drjg import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \\", "safety_stock) action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station", "10.0 state = np.array([30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "= np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities", "action == np.zeros((1, 1)) def test_random_heuristic_agent(): # Single server queue safety_stock = 1.0", "having to be above safety stock safety_stock = 10.0 state = 3 *", "np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one scheduling two buffers,", "safety_stock = 10 state = np.array([30, 20, 20, 30])[:, None] env_params = get_null_env_params(", "env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action", "above # safety stock. safety_stock = 10 state = 4 * np.ones((4, 1))", "buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None: constituency_matrix =", "= 1.1 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"]", "buffers, one larger than the other, but both below safety stock. buffer_processing_matrix =", "axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {}", "scheduling two buffers, one larger than the other, both above safety stock (swap", "2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one scheduling two buffers, the sum", "1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2])", "= - np.eye(2) safety_stock = 10.0 state = np.array([11, 11])[:, None] env_params =", "2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0.,", "np.array([30, 30, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1,", "= agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station", "1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers.", "1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent =", "assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two", "= crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((2,", "1., 1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim =", "expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]) activities = np.array([3, 4,", "assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2: 1} state", "env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action =", "def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers, the sum of", "to be above # safety stock. safety_stock = 10 state = 4 *", "env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action ==", "= get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action =", "crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0], [1], [0], [0], [1],", "sum of their size having to be above safety stock safety_stock = 10.0", "station scheduling two buffers, one larger than the other. Only the large one", "action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve():", "[0], [0]]) expected_action = np.array([[1], [0], [1], [0], [1], [0], [0]]) activities =", "agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0,", "safety # stock. safety_stock = 9.9 state = np.array([4, 5, 5, 5])[:, None]", "(action[0] + action[2] == 1) and (action[1] == 0) and (action[3] == 1)", "# safety stock. safety_stock = 10 state = 4 * np.ones((4, 1)) env_params", "0., 0., 0.], [0., 0., 0., 0., 0., -1., 0.]]) constituency_matrix = np.array([[1.,", "= np.array([5, 5, 5, 4])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "import numpy as np import pytest import snc.environments.job_generators.discrete_review_job_generator \\ as drjg import snc.environments.controlled_random_walk", "buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions():", "state = np.array([30, 20, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "state = 5 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3))", "# Single server queue safety_stock = 1.0 state = 1.1 * np.ones((1, 1))", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving():", "constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1],", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:,", "env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent =", "agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0:", "assert action == np.zeros((1, 1)) def test_random_heuristic_agent(): # Single server queue safety_stock =", "safety_stock = 10.0 state = np.array([9, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single", "buffers, each of them having to be above safety stock safety_stock = 10.0", "np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers, the sum of their", "safety # stock. safety_stock = 9.9 state = np.array([5, 5, 5, 4])[:, None]", "= [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state)", "largest buffer has to be above safety stock in this # agent. buffer_processing_matrix", "[0], [0], [0]]) expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]) activities", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0,", "action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single server", "1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1,", "10 state = np.array([20, 30, 30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "= agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single server queue", "assert (action[0] + action[2] == 1) and (action[1] == 0) and (action[3] ==", "num_buffers if buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None:", "= agent.map_state_to_actions(state) assert (action[0] + action[2] == 1) and (action[1] == 0) and", "1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action)", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving():", "* np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1,", "safety stock safety_stock = 10.0 state = 5 * np.ones((3, 1)) env_params =", "== np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers,", "10 state = np.array([30, 20, 5, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "0., 0., 0., 0., -1., 0.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0.,", "= agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three", "= agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert", "scheduling two buffers, the sum of their size having to be above #", "buffer_processing_matrix = np.array([[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0, -1,", "= [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state)", "num_resources = num_buffers if buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix", "stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9, 5])[:, None]", "each one scheduling two buffers, the sum of their size having to be", "0], [0, 0, 1, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent", "crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond():", "5} state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0.,", "priorities) assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2: 1}", "0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,", "== 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers, the sum of their", "1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params)", "[0.], [1.], [0.33], [0.33], [0.], [0.33]]) num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim)))", "in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0,", "agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one scheduling", "np.eye(2) safety_stock = 10.0 state = np.array([9, 5])[:, None] env_params = get_null_env_params( state,", "action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One", "np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action", "buffers, the sum of their size having to be above safety # stock.", "np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action)", "one scheduling two buffers. The stations are connected in serial, such that #", "5} expected_priorities = {0: 0, 1: None, 2: 5} state = np.array([[10.], [10.],", "constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent =", "20, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0], [0, 0, 1, 0]]),", "np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state)", "0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers. The", "One station scheduling two buffers, one larger than the other. Only the large", "1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one scheduling two buffers, each of", "= agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station", "env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env =", "== 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1: 2, 2: 5} state", "priorities) expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim = int(1e4)", "env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action ==", "test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two buffers, one larger than the other, both", "2, 2: 5} state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., -1.,", "of their size having to be above safety stock safety_stock = 10.0 state", "def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix =", "buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0., -1.], [0., -1., -1., 0.,", "be above safety stock safety_stock = 10.0 state = 5 * np.ones((3, 1))", "longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers =", "-1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1.,", "np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two buffers, one larger than", "-1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 30,", "0., 0., 1., 1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,", "np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two buffers, one larger", "0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0.,", "np.array([[1], [0], [1], [0], [0], [0], [0]]) expected_action = np.array([[1], [0], [1], [0.25],", "get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1],", "be above safety # stock. safety_stock = 9.9 state = np.array([5, 5, 5,", "state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params)", "buffers, the sum of their size having to be above # safety stock.", "= np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]) activities = np.array([3, 4, 5,", "large one is above # safety stock, swap order with respect to previous", "{0: 0, 2: 1} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0.,", "[0.33], [0.33], [0.], [0.33]]) num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i", "above # safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state =", "[0.25], [0.25]]) activities = np.array([3, 4, 5, 6]) num_sim = int(1e4) updated_action =", "= np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0],", "= 5 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"]", "queue safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params(state)", "= np.array([20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2))", "env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0,", "safety_stock = 9.9 state = np.array([5, 5, 5, 4])[:, None] env_params = get_null_env_params(state)", "= - np.eye(2) safety_stock = 10.0 state = np.array([9, 5])[:, None] env_params =", "env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action", "10.0 state = np.array([11, 10])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers, the", "0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env =", "np.array([30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"]", "of their size having to be above # safety stock. safety_stock = 10", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 1])[:, None]) def", "to be above safety # stock. safety_stock = 9.9 state = np.array([4, 5,", "-1., 0.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1.,", "si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving(): # Single server queue safety_stock", "= np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action =", "= {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix", "agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling", "[0], [1], [0], [0], [0], [0]]) activities = np.array([3, 4, 5, 6]) updated_action", "stock, swap order with respect to previous test. buffer_processing_matrix = - np.eye(2) safety_stock", "0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy()", "axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original", "1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers, the sum", "state = np.array([30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1,", "# Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 10.0 state", "si import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent", "with buffer 3, and 2 with 4. # Kind of condition doesn't matter", "serial, such that # buffer 1 is connected with buffer 3, and 2", "Single server queue safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params", "\\ as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate =", "def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers. The stations are", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): #", "= crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25],", "having to be above safety stock safety_stock = 1.0 state = 1.1 *", "larger than the other. Only the large one is above # safety stock,", "the sum of their size having to be above safety # stock. safety_stock", "state = 1.1 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1))", "np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server", "assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one scheduling two", "0, -1, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1,", "agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original", "buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities", "0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1,", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0,", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1, 1, 0])[:, None])", "-1., 0., 0., 0., 0.], [0., -1., 0., 0., 0., 0., 0.], [0.,", "[0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env", "env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert (action[0] +", "[1], [0], [0]]) expected_action = np.array([[1], [0], [1], [0], [1], [0], [0]]) activities", "= 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0,", "test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers, each of them having to be above", "scheduling two buffers, one larger than the other. Only the large one is", "import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def get_null_env_params(state, num_resources=None,", "as crw import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent", "np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two buffers,", "np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]]) num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1],", "def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The stations are", "env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])", "constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities def", "1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one scheduling two", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One", "10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "/ num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original ==", "1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) constituency_matrix_original", "np.array([1, 0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one scheduling", "2: 5} state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., -1., 0.,", "safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling", "action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling", "0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1.,", "[0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 5, 20])[:,", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert (action[0] + action[2] == 1) and", "action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One", "test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one scheduling two buffers. The stations are connected", "= np.array([[1], [0], [1], [0], [0], [0], [0]]) activities = np.array([3, 4, 5,", "other. Only the large one is above # safety stock, swap order with", "crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1)))", "1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env,", "safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:,", "np.array([[1], [0], [1], [0], [0], [1], [0]]) action = agent.map_state_to_actions(state=state) assert np.all(action ==", "1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action", "None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one scheduling two buffers. The stations", "1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 9, 5])[:, None]", "safety_stock = 4.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state,", "stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9, 11])[:, None]", "== np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two buffers, one larger", "0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action", "2 with 4. # Kind of condition doesn't matter since the largest buffer", "safety_stock = 10 state = np.array([20, 30, 30, 20])[:, None] env_params = get_null_env_params(", "assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers, each", "of them having to be above safety stock safety_stock = 10.0 state =", "assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1: 2, 2:", "= 10.0 state = np.array([30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "== np.zeros((1, 1)) def test_random_heuristic_agent(): # Single server queue safety_stock = 1.0 state", "than the other. Only the large one is above # safety stock, swap", "in this # agent. buffer_processing_matrix = np.array([[-1, 0, -1, 0], [0, -1, 0,", "= [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env,", "10 state = np.array([30, 20, 20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "2, 2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0.,", "= np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env,", "drjg import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \\ as", "be above safety stock. safety_stock = 9.9 state = 10 * np.ones((4, 1))", "with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1:", "env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1,", "both above safety stock (swap # order with previous test). buffer_processing_matrix = -", "assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one scheduling", "3, and 2 with 4. # Kind of condition doesn't matter since the", "assert action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two buffers,", "np.array([[1, 1, 1, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0,", "[1], [0], [0], [0], [0]]) expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25],", "np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state),", "constituency_matrix is None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval = 1 return { \"cost_per_buffer\":", "than the other, both above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock =", "i in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1)", "get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params)", "# Two stations, each one scheduling two buffers, the sum of their size", "agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources():", "np.array([20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"]", "= agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve():", "stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([30, 20])[:, None]", "1, 0, -1]]) safety_stock = 10 state = np.array([30, 30, 9, 5])[:, None]", "expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state = np.array([[10.], [10.], [10.]])", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def", "[np.array([[1, 1, 0, 0], [0, 0, 1, 0]]), np.array([[0, 0, 1, 1]])] env", "2: 1} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0.,", "env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent =", "np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0.,", "[0], [0]]) activities = np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action", "buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 10.0 state = 5 * np.ones((1,", "assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0:", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None])", "[0]]) expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]) activities = np.array([3,", "= crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1", "= [np.array([[1, 1, 0, 0], [0, 0, 1, 0]]), np.array([[0, 0, 1, 1]])]", "average_action = np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41)", "None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0,", "= np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0.,", "Two stations, each one scheduling two buffers. The stations are connected in serial,", "constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities def", "assert np.all(action == np.array([1, 0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations,", "0., 0., -1., -1., 0., -1.], [0., -1., -1., 0., 0., 0., 0.],", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving():", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] ==", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition():", "# stock. safety_stock = 9.9 state = np.array([4, 5, 5, 5])[:, None] env_params", "np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers, the", "-1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix", "np.array([30, 20, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1,", "constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0], [1],", "in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1) /", "safety_stock = 10.0 state = np.array([20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "state = np.array([30, 20, 5, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "safety_stock = 10.0 state = 5 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1, 1,", "np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1],", "be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, -1,", "buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 1, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] =", "- np.eye(2) safety_stock = 10.0 state = np.array([11, 10])[:, None] env_params = get_null_env_params(", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]]) num_sim", "0.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1.,", "expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities", "[np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action", "np.array([9, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"]", "[np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert", "agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): #", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and np.all(action[0:2]", "[np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action)", "buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0],", "= - np.eye(2) safety_stock = 10.0 state = np.array([11, 10])[:, None] env_params =", "buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9, 5])[:, None] env_params", "than the other, but both below safety stock. buffer_processing_matrix = - np.eye(2) safety_stock", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving():", "np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0.,", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0,", "0, 1: None, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1.,", "scheduling two buffers. The stations are connected in serial, such that # buffer", "test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one scheduling two buffers, each of them having", "test). buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([20, 30])[:, None]", "= np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)", "action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond():", "buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11, 11])[:, None] env_params", "env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities =", "5])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0,", "1) and (action[1] == 0) and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities =", "the large one is above # safety stock, swap order with respect to", "= np.array([30, 20, 20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0.,", "= np.array([30, 20, 5, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "= np.array([9, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2))", "5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0.,", "with previous test). buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([20,", "== env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1: 2, 2: 5} state", "env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action ==", "# Kind of condition doesn't matter since the largest buffer has to be", "snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None,", "action=action, activities=activities) assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1:", "1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)", "= agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling", "constituency_matrix = np.zeros((num_resources, num_resources)) time_interval = 1 return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state),", "10 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0],", "\"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\":", "np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1) / float(num_sim)", "connected with buffer 3, and 2 with 4. # Kind of condition doesn't", "as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate = np.ones_like(state)", "np.array([[-1., 0., -1., 0., 0., 0., 0.], [0., -1., 0., 0., 0., 0.,", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station", "safety_stock = 10.0 state = np.array([30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "buffers, both equal and above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock =", "scheduling three buffers, the sum of their size having to be above safety", "server queue safety_stock = 1.0 state = 1.1 * np.ones((1, 1)) env_params =", "having to be above safety stock. safety_stock = 10.0 state = 5 *", "np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two buffers, both equal", "Two stations, each one scheduling two buffers, the sum of their size having", "np.ones((1, 1)) safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params =", "state = np.array([30, 30, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers,", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0], [1], [0], [0], [1], [0]]) action", "np.array([3, 4, 5, 6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action == updated_action)", "one larger than the other, but both below safety stock. buffer_processing_matrix = -", "9.9 state = 10 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1,", "0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one scheduling two", "test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers, the sum of their", "agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling", "0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)),", "[0.25], [0.25], [0.25], [0.25]]) activities = np.array([3, 4, 5, 6]) num_sim = int(1e4)", "# buffer 1 is connected with buffer 3, and 2 with 4. #", "constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0.,", "= np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env,", "state = np.array([20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1,", "action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers, each of", "0, 1: 2, 2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1.,", "two buffers, the sum of their size having to be above # safety", "10 state = 4 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1,", "= agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def", "10.0 state = 3 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1,", "state = np.array([5, 5, 5, 4])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1,", "assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two", "env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 1, 0], [0, 0,", "num_buffers = state.shape[0] arrival_rate = np.ones_like(state) if num_resources is None: num_resources = num_buffers", "safety_stock = 10 state = np.array([30, 20, 5, 20])[:, None] env_params = get_null_env_params(", "[i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2)", "= get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action =", "Two stations, each one scheduling two buffers, each of them having to be", "env_params[\"constituency_matrix\"] = np.array([[1, 1, 1, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1,", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]]) num_sim =", "# safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9,", "env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action ==", "safety_stock = 1.0 state = 1.1 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "for i in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) /", "= 10.0 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "* np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0,", "= np.zeros((num_resources, num_resources)) time_interval = 1 return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\":", "get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])", "= agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each", "action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each", "the other, both above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0", "safety_stock = 1.0 state = 1.1 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "two buffers, the sum of their size having to be above safety #", "# stock. safety_stock = 9.9 state = 5 * np.ones((4, 1)) env_params =", "np.array([[1], [0], [1], [0], [0], [0], [0]]) activities = np.array([3, 4, 5, 6])", "np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0., -1.], [0.,", "two buffers, each of them having to be above safety stock. safety_stock =", "the large one is above # safety stock. buffer_processing_matrix = - np.eye(2) safety_stock", "np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action", "1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert", "= int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): actions[:, [i]] =", "= crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [0], [0], [1],", "be above safety # stock. safety_stock = 9.9 state = np.array([4, 5, 5,", "-np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval = 1", "-1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1.,", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:,", "to be above safety stock safety_stock = 10.0 state = 5 * np.ones((3,", "test. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11, 10])[:, None]", "[0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., -1.,", "test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers. The stations are connected", "== 0) and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1:", "random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def get_null_env_params(state,", "3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action", "safety stock. safety_stock = 9.9 state = 10 * np.ones((4, 1)) env_params =", "= np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],", "stations are connected in serial, such that # buffer 1 is connected with", "size having to be above # safety stock. safety_stock = 10 state =", "== 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one scheduling two buffers, the", "-1.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., 0., 0.,", "action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station", "env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action", "activities = np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action == updated_action)", "decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix", "np.random.seed(42) priorities = {} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0.,", "# Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 4.0 state", "actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action", "def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0, 1: 2, 2: 5} state =", "get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] =", "0, -1]]) safety_stock = 10 state = np.array([30, 20, 9, 5])[:, None] env_params", "10 state = np.array([30, 20, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None,", "[1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state =", "0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state)", "assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2: 5} expected_priorities", "6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1: 2, 2: 5} state", "agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one", "== 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one scheduling two buffers, each", "== updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1: 2, 2: 5} state", "= 10 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0,", "env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env,", "action[2] == 1) and (action[1] == 0) and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given():", "safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9, 11])[:,", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order():", "np.random.seed(42) priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.],", "np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers, each of", "np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0,", "other, but both below safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0", "int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state)", "= crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2", "scheduling two buffers, one larger than the other, both above safety stock. buffer_processing_matrix", "1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ =", "the other, both above safety stock (swap # order with previous test). buffer_processing_matrix", "= crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3,", "buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9, 11])[:, None] env_params", "arrival_rate = np.ones_like(state) if num_resources is None: num_resources = num_buffers if buffer_processing_matrix is", "two buffers, both equal and above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock", "np.array([30, 20, 5, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1,", "each one scheduling two buffers. The stations are connected in serial, such that", "having to be above # safety stock. safety_stock = 10 state = 4", "[0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1,", "np.all(action[2:4] == np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one scheduling", "[0.25], [0.25], [0.25]]) activities = np.array([3, 4, 5, 6]) num_sim = int(1e4) updated_action", "snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate", "safety_stock = 9.9 state = np.array([4, 5, 5, 5])[:, None] env_params = get_null_env_params(state)", "np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]) activities = np.array([3, 4, 5, 6])", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1)))", "0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0.,", "= crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1", "np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two buffers, one larger", "activities = np.array([3, 4, 5, 6]) num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim))", "def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one scheduling two buffers, the sum of", "= np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim = int(1e4) actions =", "= crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33],", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1,", "# Station scheduling three buffers, the sum of their size having to be", "to previous test. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11,", "order with previous test). buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state =", "agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers, the", "[10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0., -1.], [0., -1.,", "state = np.array([20, 30, 30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "= np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action =", "above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, -1, 0],", "both above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state =", "= np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env,", "5, 5, 5])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0],", "updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1: 2, 2: 5} state =", "np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions,", "stations, each one scheduling two buffers. The stations are connected in serial, such", "def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one scheduling two buffers. The stations are", "0., 0.], [0., -1., 0., 0., 0., 0., 0.], [0., 0., 0., -1.,", "constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5], [0.5],", "and np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix =", "env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0],", "drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, } def", "- np.eye(2) safety_stock = 10.0 state = np.array([9, 5])[:, None] env_params = get_null_env_params(", "1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action", "assert np.all(action == np.array([1, 0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations,", "[0], [0], [0], [0]]) expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]])", "1)) safety_stock = 4.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params(", "buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving(): #", "condition doesn't matter since the largest buffer has to be above safety stock", "None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1,", "def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2: 1} state = np.array([[10.], [10.], [10.]])", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1))", "0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 5,", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): #", "1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two buffers, one larger than the", "10 state = np.array([30, 30, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "-1]]) safety_stock = 10 state = np.array([20, 30, 30, 20])[:, None] env_params =", "constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [0],", "sum of their size having to be above safety # stock. safety_stock =", "= np.array([3, 4, 5, 6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action ==", "None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two buffers, one larger than the", "= 10 state = np.array([30, 30, 9, 5])[:, None] env_params = get_null_env_params( state,", "1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert (action[0]", "action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each", "np.array([[1], [0], [0], [0], [1], [0], [0]]) expected_action = np.array([[1], [0], [1], [0],", "agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two", "[np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state)", "= np.array([[-1., 0., 0., -1., -1., 0., -1.], [0., -1., -1., 0., 0.,", "1, 1, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0],", "[np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert", "1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1,", "np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two buffers,", "== np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two buffers, one", "stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11, 11])[:, None]", "\\ as drjg import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent", "\"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, }", "= get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env =", "action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two buffers, one", "buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0., 0.], [0., -1., 0., 0.,", "action=action, activities=activities) assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0,", "np.array([30, 20, 20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1,", "env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action", "def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two buffers, one larger than the other.", "safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations,", "10.0 state = 5 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1,", "= np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix)", "= get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params)", "def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers, the sum of their size having", "= 9.9 state = 10 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2])", "20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 1, 0],", "None, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0.,", "buffers, one larger than the other, both above safety stock (swap # order", "None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The stations", "[1.], [0.33], [0.33], [0.], [0.33]]) num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for", "decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0, 1: 2, 2: 5} state", "be above # safety stock. safety_stock = 10 state = 4 * np.ones((4,", "0, 2: 5} expected_priorities = {0: 0, 1: None, 2: 5} state =", "agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [0], [0], [1], [0], [0]])", "# One station scheduling two buffers, one larger than the other, but both", "= 1.0 state = 1.1 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([20, 30, 30,", "safety_stock = 10.0 state = np.array([11, 10])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state,", "queue safety_stock = 1.0 state = 1.1 * np.ones((1, 1)) env_params = get_null_env_params(state)", "== np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one scheduling two buffers,", "[0], [0]]) activities = np.array([3, 4, 5, 6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities)", "one scheduling two buffers, each of them having to be above safety stock.", "= - np.ones((1, 1)) safety_stock = 10.0 state = 5 * np.ones((1, 1))", "np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix = - np.ones((1, 1))", "def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two buffers, one larger than the other,", "0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0.,", "agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): #", "np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities =", "10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1,", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4]", "below safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9,", "-1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., -1., 0.]]) constituency_matrix", "# Two stations, each one scheduling two buffers. The stations are connected in", "* np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1,", "agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond():", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4]", "server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 10.0 state = 5", "= np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0., 0.],", "\"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving(): # Single server queue safety_stock = 10.0 state", "[0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1.,", "= crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert (action[0] + action[2]", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_random_heuristic_agent(): #", "test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.],", "= np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action == updated_action) def", "1 and np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix", "0., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix", "np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0,", "agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0], [1], [0], [0], [1], [0]])", "1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers, each of them having to", "= crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.ones((1,", "-1]]) safety_stock = 10 state = np.array([30, 30, 9, 5])[:, None] env_params =", "state = 1.1 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3))", "agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling", "0., 0., 0.], [0., -1., 0., 0., 0., 0., 0.], [0., 0., 0.,", "np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two buffers, one larger than", "crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.],", "5, 6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action():", "1, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)", "[np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action", "= - np.eye(2) safety_stock = 10.0 state = np.array([30, 20])[:, None] env_params =", "each of them having to be above safety stock. safety_stock = 10.0 state", "20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0],", "import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as", "action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling", "one larger than the other, both above safety stock. buffer_processing_matrix = - np.eye(2)", "0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])]", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): #", "= np.array([3, 4, 5, 6]) num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for", "state = np.array([4, 5, 5, 5])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1,", "1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 5, 20])[:, None]", "them having to be above safety stock. safety_stock = 9.9 state = 10", "= get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)", "name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One", "are connected in serial, such that # buffer 1 is connected with buffer", "1 and np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one", "0, -1]]) safety_stock = 10 state = np.array([30, 20, 5, 20])[:, None] env_params", "np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert", "of them having to be above safety stock safety_stock = 1.0 state =", "assert action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers, each", "for i in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action,", "= agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers,", "env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2),", "env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params)", "2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action =", "np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions():", "30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0,", "env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) ==", "action = np.array([[1], [0], [0], [0], [1], [0], [0]]) expected_action = np.array([[1], [0],", "1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env =", "Kind of condition doesn't matter since the largest buffer has to be above", "import snc.environments.job_generators.discrete_review_job_generator \\ as drjg import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as si", "= 3 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"]", "np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0.,", "safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11, 11])[:,", "-1]]) safety_stock = 10 state = np.array([30, 20, 9, 5])[:, None] env_params =", "numpy as np import pytest import snc.environments.job_generators.discrete_review_job_generator \\ as drjg import snc.environments.controlled_random_walk as", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [0], [0], [1], [0], [0]]) expected_action", "/ float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state", "1: 2, 2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0.,", "def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.],", "# Single server queue safety_stock = 10.0 state = 5 * np.ones((1, 1))", "test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [0.],", "env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert action", "5, 6]) num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim):", "== constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1:", "0, 1, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env,", "env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent =", "Station scheduling three buffers, the sum of their size having to be above", "agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]])", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0], [1], [0], [0], [1], [0]]) action =", "get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1],", "5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state)", "{0: 0, 1: None, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix =", "stock. safety_stock = 9.9 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state)", "1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one scheduling two buffers, each of", "np.array([11, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"]", "np.all(action == np.array([1, 0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each", "[0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0.,", "10.0 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1,", "test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one scheduling two buffers, the sum of their", "test_random_heuristic_agent_starving(): # Single server queue safety_stock = 10.0 state = 5 * np.ones((1,", "1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env,", "env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action =", "matter since the largest buffer has to be above safety stock in this", "float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0, 1:", "= 1.1 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"]", "def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two buffers, one larger than the other,", "== np.array([1, 0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one", "snc.environments.job_generators.discrete_review_job_generator \\ as drjg import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as si import", "with respect to previous test. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state", "one scheduling two buffers, the sum of their size having to be above", "= {0: 0, 2: 1} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1.,", "sum of their size having to be above # safety stock. safety_stock =", "above safety # stock. safety_stock = 9.9 state = np.array([4, 5, 5, 5])[:,", "crw import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\", "np.array([1, 0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling", "crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and", "expected_action = np.array([[1], [0], [1], [0], [0], [1], [0]]) action = agent.map_state_to_actions(state=state) assert", "safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations,", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): #", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert (action[0] + action[2] == 1)", "\"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\":", "action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station", "0., 0., 0., 0.], [0., -1., 0., 0., 0., 0., 0.], [0., 0.,", "[0], [0], [0]]) activities = np.array([3, 4, 5, 6]) updated_action = agent.sample_random_actions(state=state, action=action,", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two", "state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent", "constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities", "state = 5 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1))", "that # buffer 1 is connected with buffer 3, and 2 with 4.", "env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0], [0, 0, 1, 0]]), np.array([[0, 0, 1,", "0., -1., 0.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0.,", "-1., -1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., -1., 0.]])", "0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([20,", "== np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two buffers, both", "state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 1, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"]", "20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0,", "= 1 return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate,", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]]) activities =", "= get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"]", "num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): actions[:, [i]]", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert (action[0] + action[2] == 1) and (action[1]", "-1., -1., 0., -1.], [0., -1., -1., 0., 0., 0., 0.], [0., 0.,", "env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert", "action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix = -", "above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0],", "import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0]", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0,", "= 4.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]]) expected_action =", "9.9 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1,", "-1]]) safety_stock = 10 state = np.array([30, 20, 20, 30])[:, None] env_params =", "average_updated_action = np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42)", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def", "= crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0,", "np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one scheduling two buffers.", "np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one scheduling two buffers,", "[0], [0], [0], [1], [0], [0]]) expected_action = np.array([[1], [0], [1], [0], [1],", "np.array([0, 1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one scheduling", "env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.], [1.], [0.33],", "Only the large one is above # safety stock, swap order with respect", "since the largest buffer has to be above safety stock in this #", "[10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1.,", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 1])[:, None])", "state = np.array([9, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1,", "order with respect to previous test. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0", "= get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 1, 0], [0, 0, 1,", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition():", "get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities ==", "30, 30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "= 10.0 state = np.array([11, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one scheduling two", "test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.],", "0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two buffers, one larger than", "4 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0],", "- np.eye(2) safety_stock = 10.0 state = np.array([9, 11])[:, None] env_params = get_null_env_params(", "4.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): #", "to be above safety stock. safety_stock = 10.0 state = 5 * np.ones((4,", "state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0.,", "state = np.array([9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1,", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]]) expected_action", "state = 10 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 4.0 state = 5 * np.ones((1,", "above safety stock. safety_stock = 9.9 state = 10 * np.ones((4, 1)) env_params", "2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42)", "np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one scheduling two buffers, each", "size having to be above safety # stock. safety_stock = 9.9 state =", "1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two buffers, both equal and", "= agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one", "= crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0:", "= [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state)", "constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)", "== np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix = - np.ones((1,", "[0]]) activities = np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action ==", "get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent =", "than the other, both above safety stock (swap # order with previous test).", "of them having to be above safety stock. safety_stock = 9.9 state =", "their size having to be above safety stock safety_stock = 10.0 state =", "0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state)", "0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1.,", "= crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1,", "np.eye(2) safety_stock = 10.0 state = np.array([11, 11])[:, None] env_params = get_null_env_params( state,", "num_resources is None: num_resources = num_buffers if buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers,", "def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate = np.ones_like(state) if num_resources", "[0]]) expected_action = np.array([[1], [0], [1], [0], [1], [0], [0]]) activities = np.array([1,", "= - np.ones((1, 1)) safety_stock = 4.0 state = 5 * np.ones((1, 1))", "stations, each one scheduling two buffers, each of them having to be above", "= crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities", "def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two buffers, both equal and above safety", "larger than the other. Only the large one is above # safety stock.", "env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) ==", "== np.array([1, 0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one", "their size having to be above safety # stock. safety_stock = 9.9 state", "size having to be above safety stock safety_stock = 10.0 state = 3", "= [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action =", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2,", "np.ones_like(state) if num_resources is None: num_resources = num_buffers if buffer_processing_matrix is None: buffer_processing_matrix", "1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert", "of them having to be above safety stock. safety_stock = 10.0 state =", "), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving(): # Single server", "action = agent.map_state_to_actions(state) assert (action[0] + action[2] == 1) and (action[1] == 0)", "size having to be above safety stock safety_stock = 10.0 state = 5", "0., 0., 0., -1., 0.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0.,", "and above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state =", "np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))]", "env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) ==", "30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] =", "# safety stock, swap order with respect to previous test. buffer_processing_matrix = -", "0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)", "= np.array([[-1., 0., -1., 0., 0., 0., 0.], [0., -1., 0., 0., 0.,", "np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one scheduling two buffers,", "= get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params)", "10.0 state = np.array([9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9, 5])[:,", "= {0: 0, 2: 5} expected_priorities = {0: 0, 1: None, 2: 5}", "action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1))) def", "= np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1,", "pytest import snc.environments.job_generators.discrete_review_job_generator \\ as drjg import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as", "agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling", "station scheduling two buffers, one larger than the other, both above safety stock.", "safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations,", "= custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1: 2, 2: 5}", "One station scheduling two buffers, one larger than the other, but both below", "env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1: 2, 2: 5} state =", "[0], [1], [0], [0]]) expected_action = np.array([[1], [0], [1], [0], [1], [0], [0]])", "4, 5, 6]) num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in", "# agent. buffer_processing_matrix = np.array([[-1, 0, -1, 0], [0, -1, 0, 0], [1,", "buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5],", "i in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / num_sim", "priorities) expected_action = np.array([[1], [0], [1], [0], [0], [1], [0]]) action = agent.map_state_to_actions(state=state)", "1: None, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0.,", "= crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0],", "is above # safety stock, swap order with respect to previous test. buffer_processing_matrix", "1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0]]),", "state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"]", "env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given():", "def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.],", "queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 10.0 state = 5 *", "[0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0], [0, 0, 1,", "= crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1", "previous test). buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([20, 30])[:,", "1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer():", "= np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): #", "above safety stock (swap # order with previous test). buffer_processing_matrix = - np.eye(2)", "each of them having to be above safety stock safety_stock = 10.0 state", "def test_random_heuristic_agent(): # Single server queue safety_stock = 1.0 state = 1.1 *", "= 1.0 state = 1.1 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "assert action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix =", "def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two buffers, one larger than the other,", "0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0.,", "# One station scheduling two buffers, both equal and above safety stock. buffer_processing_matrix", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two", "[0], [1], [0], [0], [1], [0]]) action = agent.map_state_to_actions(state=state) assert np.all(action == expected_action)", "[0.33]]) num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)): actions[:,", "np.array([20, 30, 30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1,", "[0]]) activities = np.array([3, 4, 5, 6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert", "test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1.,", "crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and", "0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0], [0, 0,", "\\ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent", "crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.ones((1, 1))", "-1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([20, 30,", "safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single", "= agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0:", "np.ones((1, 1)) safety_stock = 4.0 state = 5 * np.ones((1, 1)) env_params =", "3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action", "= crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1", "1} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0.,", "== 1 and np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each", "updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action,", "action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order():", "= {0: 0, 1: 2, 2: 5} state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix", "None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two buffers, one larger than the", "= 9.9 state = np.array([4, 5, 5, 5])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two", "1 and np.all(action[2:4] == np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each", "is above # safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state", "np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions,", "buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0],", "0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 20,", "[0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0., 0.], [0., -1.,", "assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two", "buffers, one larger than the other, both above safety stock. buffer_processing_matrix = -", "float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state =", "0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition():", "== np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two buffers, one larger", "crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0], [0],", "+ action[2] == 1) and (action[1] == 0) and (action[3] == 1) def", "get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env =", "np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock,", "{0: 0, 2: 5} expected_priorities = {0: 0, 1: None, 2: 5} state", "assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one scheduling two", "np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two", "= get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action =", "state = 4 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "to be above safety stock safety_stock = 1.0 state = 1.1 * np.ones((3,", "test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two buffers, one larger than the other, but", "num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix)", "num_resources))) if constituency_matrix is None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval = 1 return", "0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0,", "[0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock", "- np.ones((1, 1)) safety_stock = 4.0 state = 5 * np.ones((1, 1)) env_params", "20, 5, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "[0.25]]) activities = np.array([3, 4, 5, 6]) num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1],", "= np.array([11, 10])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2))", "np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers, the sum", "1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0], [0, 0, 1, 0]]), np.array([[0,", "get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1],", "constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.], [1.],", "priorities = {} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0.,", "Only the large one is above # safety stock. buffer_processing_matrix = - np.eye(2)", "1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env =", "buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent", "two buffers, one larger than the other. Only the large one is above", "assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two", "= 10 state = np.array([20, 30, 30, 20])[:, None] env_params = get_null_env_params( state,", "both equal and above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0", "None: num_resources = num_buffers if buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if", "env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state)", "{0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix =", "stock. safety_stock = 10.0 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state)", "= np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)", "1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers, the sum of their size", "np.array([3, 4, 5, 6]) num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i", "[np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action)", "than the other. Only the large one is above # safety stock. buffer_processing_matrix", "i in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / float(num_sim)", "-1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],", "two buffers, one larger than the other, but both below safety stock. buffer_processing_matrix", "action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations,", "2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0.,", "be above safety stock safety_stock = 10.0 state = 3 * np.ones((3, 1))", "import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent", "assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two", "= 10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "0., -1.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., 0.,", "= agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities =", "safety stock safety_stock = 10.0 state = 3 * np.ones((3, 1)) env_params =", "buffers, the sum of their size having to be above safety stock safety_stock", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2,", "server queue safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params =", "= 10.0 state = np.array([11, 10])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station", "= np.array([[1], [0], [1], [0], [1], [0], [0]]) activities = np.array([1, 2]) updated_action", "np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one scheduling two", "0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params", "np.array([[1], [0], [1], [0], [1], [0], [0]]) activities = np.array([1, 2]) updated_action =", "[10.]]) buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0., 0.], [0., -1., 0.,", "crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1))", "buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([30, 20])[:, None] env_params", "np.array([9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"]", "= agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each", "np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0,", "of condition doesn't matter since the largest buffer has to be above safety", "(action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1: 2, 2: 5}", "= crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1,", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two", "np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1),", "2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert", "= 10.0 state = np.array([20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"]", "them having to be above safety stock safety_stock = 10.0 state = 5", "-1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0.,", "buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval", "one is above # safety stock, swap order with respect to previous test.", "assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two", "for i in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) /", "as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two", "11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] =", "[0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0., -1.], [0., -1., -1.,", "= np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2,", "activities=activities) assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0, 1:", "[1], [0], [0]]) activities = np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert", "agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one scheduling", "station scheduling two buffers, one larger than the other, both above safety stock", "= {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix", "test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one scheduling two buffers, the sum of their", "1 is connected with buffer 3, and 2 with 4. # Kind of", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def", "= agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): #", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): #", "connected in serial, such that # buffer 1 is connected with buffer 3,", "as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def", "-1., 0., -1.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0.,", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): #", "buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([20, 30])[:, None] env_params", "safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "activities=activities) assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1: 2,", "the other. Only the large one is above # safety stock. buffer_processing_matrix =", "1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The stations", "-1, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0,", "= crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4,", "buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11, 10])[:, None] env_params", "0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params)", "0, 2: 1} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0.,", "equal and above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state", "snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \\ as random_nonidling_agent import", "0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one scheduling two buffers.", "be above safety stock safety_stock = 1.0 state = 1.1 * np.ones((3, 1))", "action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One", "= np.array([9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2))", "[0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 30, 9, 5])[:,", "= crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0], [1], [0], [0],", "5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0],", "stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, -1, 0], [0, -1,", "np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)", "priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [0.]])", "= agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action,", "1)) def test_random_heuristic_agent(): # Single server queue safety_stock = 1.0 state = 1.1", "np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env", "sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\": True, \"list_boundary_constraint_matrices\": None, } def test_random_heuristic_agent_starving(): # Single", "buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0.,", "stock. safety_stock = 9.9 state = 10 * np.ones((4, 1)) env_params = get_null_env_params(state)", "stock safety_stock = 1.0 state = 1.1 * np.ones((3, 1)) env_params = get_null_env_params(state)", "and (action[1] == 0) and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0:", "safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, -1, 0], [0,", "# agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1,", "[1], [0.25], [0.25], [0.25], [0.25]]) activities = np.array([3, 4, 5, 6]) num_sim =", "[0, 1, 0, -1]]) safety_stock = 10 state = np.array([20, 30, 30, 20])[:,", "= random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): #", "0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30,", "assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers, the sum", "other, both above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state", "1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers, the sum", "== 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving():", "0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1.,", "1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action", "is connected with buffer 3, and 2 with 4. # Kind of condition", "= 5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1,", "= agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three", "1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock", "test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two buffers, one larger than the other. Only", "= np.array([[1], [0], [1], [0], [0], [1], [0]]) action = agent.map_state_to_actions(state=state) assert np.all(action", "1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state = np.array([[10.], [10.],", "0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0.,", "np.eye(2) safety_stock = 10.0 state = np.array([9, 11])[:, None] env_params = get_null_env_params( state,", "np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0], [0, 0,", "2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., -1.,", "as np import pytest import snc.environments.job_generators.discrete_review_job_generator \\ as drjg import snc.environments.controlled_random_walk as crw", "= 5 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"]", "and np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one scheduling", "agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix", "np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers, each of them having", "0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one scheduling two buffers. The", "def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one scheduling two buffers, each of them", "= np.array([[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0, -1, 0],", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve():", "above safety stock safety_stock = 1.0 state = 1.1 * np.ones((3, 1)) env_params", "actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action,", "one is above # safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0", "test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two buffers, both equal and above safety stock.", "be above safety # stock. safety_stock = 9.9 state = 5 * np.ones((4,", "num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)): actions[:, [i]]", "env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env =", "is None: num_resources = num_buffers if buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources)))", "Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 10.0 state =", "is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None: constituency_matrix = np.zeros((num_resources,", "action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def", "get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate = np.ones_like(state) if num_resources is", "10.0 state = np.array([9, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "= [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent", "buffers, each of them having to be above safety stock. safety_stock = 10.0", "crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0,", "test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The stations are connected", "def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one scheduling two buffers, each of them", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two", "0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.],", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def", "= np.array([30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2))", "np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)", "0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers.", "updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0, 1: 2, 2: 5} state", "if num_resources is None: num_resources = num_buffers if buffer_processing_matrix is None: buffer_processing_matrix =", "agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one scheduling", "9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0,", "state = 3 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3))", "env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env,", "priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [10.]])", "None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 1, 0], [0,", "None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers. The stations", "custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2:", "0., 1., 1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix)", "/ float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0,", "safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_random_heuristic_agent(): # Single", "get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def", "actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action,", "== 1 and np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server queue", "1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params", "np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix = - np.ones((1, 1))", "[0.], [0.33]]) num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)):", "= np.ones_like(state) if num_resources is None: num_resources = num_buffers if buffer_processing_matrix is None:", "np.all(action == np.array([1, 0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each", "= np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]]) num_sim = 5e4 actions =", "action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each", "# Two stations, each one scheduling two buffers, each of them having to", "[0.25]]) num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): actions[:,", "def test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock =", "safety stock. safety_stock = 10 state = 4 * np.ones((4, 1)) env_params =", "to be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0,", "agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two", "is None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval = 1 return { \"cost_per_buffer\": np.zeros_like(state),", "1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError):", "1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two buffers, one larger than the", "safety_stock = 10 state = np.array([30, 30, 9, 5])[:, None] env_params = get_null_env_params(", "safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 0])[:, None]) def", "two buffers, one larger than the other, both above safety stock. buffer_processing_matrix =", "(action[1] == 0) and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0,", "\"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), \"state_initialiser\": si.DeterministicCRWStateInitialiser(state), \"job_conservation_flag\":", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def", "station scheduling two buffers, one larger than the other, but both below safety", "safety_stock) action = agent.map_state_to_actions(state) assert (action[0] + action[2] == 1) and (action[1] ==", "0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params =", "assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0, 1: 2,", "[1], [0], [1], [0], [0]]) activities = np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action,", "state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 4.0", "crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert (action[0] + action[2] ==", "above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11,", "= state.shape[0] arrival_rate = np.ones_like(state) if num_resources is None: num_resources = num_buffers if", "[0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for", "other, both above safety stock (swap # order with previous test). buffer_processing_matrix =", "= - np.eye(2) safety_stock = 10.0 state = np.array([9, 11])[:, None] env_params =", "np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1: 2, 2: 5}", "* np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)]", "== np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix = - np.ones((1,", "updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities", "safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): #", "np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)", "0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0.,", "np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0., 0.], [0.,", "== np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers, each of them", "expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2: 1} state = np.array([[10.], [10.],", "1, 0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2,", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent():", "env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) ==", "== np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers, the sum of", "assert np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two buffers,", "np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one scheduling two", "test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one scheduling two buffers. The stations are connected", "safety_stock = 10.0 state = np.array([9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "-1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock =", "each of them having to be above safety stock. safety_stock = 9.9 state", "4. # Kind of condition doesn't matter since the largest buffer has to", "crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def", "def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers, each of them having to be", "np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.ones((1, 3))]", "0, 0], [0, 0, 1, 1]]) env_params[\"list_boundary_constraint_matrices\"] = [np.array([[1, 1, 0, 0]]), np.array([[0,", "the sum of their size having to be above # safety stock. safety_stock", "respect to previous test. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state =", "env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5], [0.5], [0.25],", "3 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] =", "test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.],", "[0], [1], [0], [1], [0], [0]]) activities = np.array([1, 2]) updated_action = agent.sample_random_actions(state=state,", "2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action =", "np.zeros((num_resources, num_resources)) time_interval = 1 return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix,", "# order with previous test). buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state", "average_action = np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original ==", "1, 0, -1]]) safety_stock = 10 state = np.array([20, 30, 30, 20])[:, None]", "safety stock. safety_stock = 10.0 state = 5 * np.ones((4, 1)) env_params =", "constituency_matrix=None): num_buffers = state.shape[0] arrival_rate = np.ones_like(state) if num_resources is None: num_resources =", "10.0 state = np.array([20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] =", "safety_stock = 10 state = 4 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "one larger than the other, both above safety stock (swap # order with", "having to be above safety # stock. safety_stock = 9.9 state = 5", "scheduling three buffers, each of them having to be above safety stock safety_stock", "[0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1.,", "[0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim))", "= num_buffers if buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is", "= 10 state = np.array([30, 20, 9, 5])[:, None] env_params = get_null_env_params( state,", "num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate = np.ones_like(state) if num_resources is None:", "= get_null_env_params(state) env_params[\"constituency_matrix\"] = np.ones((1, 3)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent", "safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1)))", "5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1, 0, 0],", "large one is above # safety stock. buffer_processing_matrix = - np.eye(2) safety_stock =", "0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action", "= 10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2: 5} expected_priorities = {0: 0, 1: None,", "1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert", "-1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20,", "crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities =", "safety_stock = 10.0 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "{0: 0, 1: 2, 2: 5} state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix =", "= agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): #", "[0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env", "= agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling", "\\ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \\ as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None):", "agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name=\"LPAAgent\") action = agent.map_state_to_actions(state) assert action == np.ones((1, 1))", "stock. safety_stock = 9.9 state = np.array([4, 5, 5, 5])[:, None] env_params =", "having to be above safety stock safety_stock = 10.0 state = 5 *", "agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two", "1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0, 1: 2, 2:", "= 10 state = np.array([30, 20, 5, 20])[:, None] env_params = get_null_env_params( state,", "agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def", "= agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving():", "== 1) and (action[1] == 0) and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities", "== expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2: 1} state = np.array([[10.],", "None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two buffers, one larger than the", "them having to be above safety stock safety_stock = 1.0 state = 1.1", "np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers, the sum of", "1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 20, 30])[:, None]", "updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1,", "safety_stock = 10.0 state = np.array([11, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix)", "crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities =", "expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0, 1: 2, 2: 5}", "in serial, such that # buffer 1 is connected with buffer 3, and", "test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers, each of them having to be above", "None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)]", "* np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 1)) env_params[\"list_boundary_constraint_matrices\"]", "but both below safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state", "having to be above safety stock. safety_stock = 9.9 state = 10 *", "0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 9,", "= longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] ==", "env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [0], [0],", "such that # buffer 1 is connected with buffer 3, and 2 with", "assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): #", "time_interval = 1 return { \"cost_per_buffer\": np.zeros_like(state), \"capacity\": np.zeros_like(state), \"constituency_matrix\": constituency_matrix, \"job_generator\": drjg.DeterministicDiscreteReviewJobGenerator(", "priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [0.], [10.]])", "to be above safety stock. safety_stock = 9.9 state = 10 * np.ones((4,", "6]) num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): updated_action[:,", "action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:, None])", "test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers, the sum of their size having to", "[0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 9, 5])[:,", "of their size having to be above safety # stock. safety_stock = 9.9", "[0., 0., 0., 0., 0., -1., 0.]]) constituency_matrix = np.array([[1., 0., 0., 0.,", "this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0, 0],", "and 2 with 4. # Kind of condition doesn't matter since the largest", "= agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one", "be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0,", "the largest buffer has to be above safety stock in this # agent.", "The stations are connected in serial, such that # buffer 1 is connected", "updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities =", "1.]]) constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent", "0., -1., 0., 0., 0., 0.], [0., -1., 0., 0., 0., 0., 0.],", "one larger than the other. Only the large one is above # safety", "to be above safety # stock. safety_stock = 9.9 state = np.array([5, 5,", "priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2: 5} expected_priorities = {0: 0,", "longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 0])[:, None])", "np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1),", "One station scheduling two buffers, one larger than the other, both above safety", "random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): #", "None, } def test_random_heuristic_agent_starving(): # Single server queue safety_stock = 10.0 state =", "safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1)))", "agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer():", "0., 0., 0., 0.], [0., 0., 0., 0., 0., -1., 0.]]) constituency_matrix =", "= -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval =", "9.9 state = np.array([5, 5, 5, 4])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"] =", "pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1: 2,", "0.], [0., 0., 0., 0., 0., -1., 0.]]) constituency_matrix = np.array([[1., 0., 0.,", "test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one scheduling two buffers, each of them having", "buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent =", "test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The stations are connected", "= agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three", "= np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities)", "= [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert", "= 9.9 state = np.array([5, 5, 5, 4])[:, None] env_params = get_null_env_params(state) env_params[\"constituency_matrix\"]", "safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state,", "int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state,", "np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two buffers, one larger", "20, 20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.array([[1, 1,", "[0], [1], [0], [0]]) activities = np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities)", "1)) def test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock", "= np.array([[1], [0], [0], [0], [1], [0], [0]]) expected_action = np.array([[1], [0], [1],", "5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params[\"constituency_matrix\"] = np.ones((1, 2)) env_params[\"list_boundary_constraint_matrices\"] =", "int(num_sim))) for i in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1)", "0, -1]]) safety_stock = 10 state = np.array([30, 30, 9, 5])[:, None] env_params", "np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix = -", "stock. safety_stock = 10 state = 4 * np.ones((4, 1)) env_params = get_null_env_params(state)", "assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1: 2, 2:", "import pytest import snc.environments.job_generators.discrete_review_job_generator \\ as drjg import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser", "np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two buffers," ]
[ "torchvision import transforms def get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) transform", "def get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) transform = Compose([normalize]) return", "from torchvision.transforms import Compose,Normalize,RandomCrop,RandomResizedCrop,Resize,RandomHorizontalFlip, ToTensor from torchvision import transforms def get_transforms(): normalize =", "get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) transform = Compose([normalize]) return transform", "torch from torchvision.transforms import Compose,Normalize,RandomCrop,RandomResizedCrop,Resize,RandomHorizontalFlip, ToTensor from torchvision import transforms def get_transforms(): normalize", "ToTensor from torchvision import transforms def get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224,", "import torch from torchvision.transforms import Compose,Normalize,RandomCrop,RandomResizedCrop,Resize,RandomHorizontalFlip, ToTensor from torchvision import transforms def get_transforms():", "from torchvision import transforms def get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])", "transforms def get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) transform = Compose([normalize])", "Compose,Normalize,RandomCrop,RandomResizedCrop,Resize,RandomHorizontalFlip, ToTensor from torchvision import transforms def get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229,", "torchvision.transforms import Compose,Normalize,RandomCrop,RandomResizedCrop,Resize,RandomHorizontalFlip, ToTensor from torchvision import transforms def get_transforms(): normalize = Normalize(mean=[0.485,", "import transforms def get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) transform =", "import Compose,Normalize,RandomCrop,RandomResizedCrop,Resize,RandomHorizontalFlip, ToTensor from torchvision import transforms def get_transforms(): normalize = Normalize(mean=[0.485, 0.456," ]
[ "Check that training and testing sets are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain =", "plt import pprint def missingIsNan(s): return np.nan if s == b'?' else float(s)", "column of 1s XtrainS1 = np.insert(XtrainS, 0, 1, 1) XtestS1 = np.insert(XtestS, 0,", "numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices = rows[nTrain:] # Check", "# 3. Split it into input (X) and target (T) # Target =", "= X[testIndices, :] Ttest = T[testIndices, :] # 5. Standardize (standardize, unstandardize) =", "as plt import pprint def missingIsNan(s): return np.nan if s == b'?' else", "0) def standardize(origX): return (origX - means) / stds def unstandardize(stdX): return stds", "standardize(Xtrain) XtestS = standardize(Xtest) # 6. Tack column of 1s XtrainS1 = np.insert(XtrainS,", "# print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :] Ttrain = T[trainIndices, :] Xtest =", "XtrainS = standardize(Xtrain) XtestS = standardize(Xtest) # 6. Tack column of 1s XtrainS1", "= np.insert(XtrainS, 0, 1, 1) XtestS1 = np.insert(XtestS, 0, 1, 1) # 7.", "# 2. 'Clean' the data. Cdata = data[~np.isnan(data).any(axis = 1)] # 3. Split", "np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters = {3: missingIsNan}) # 2. 'Clean' the data.", "are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :] Ttrain = T[trainIndices, :]", "s == b'?' else float(s) def makeStandardize(X): means = X.mean(axis = 0) stds", "2 to 7 T = Cdata[:, 0:1] X = Cdata[:, 1:] # 4.", "usecols = range(8), converters = {3: missingIsNan}) # 2. 'Clean' the data. Cdata", "{3: missingIsNan}) # 2. 'Clean' the data. Cdata = data[~np.isnan(data).any(axis = 1)] #", "np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices = rows[nTrain:] # Check that training and testing", "testIndices = rows[nTrain:] # Check that training and testing sets are disjoint #", "XtrainS1 = np.insert(XtrainS, 0, 1, 1) XtestS1 = np.insert(XtestS, 0, 1, 1) #", "XtrainS1.T @ Ttrain, rcond = None)[0] # 8. Predict predict = XtestS1 @", "%) nRows = X.shape[0] nTrain = int(round(0.8*nRows)) nTest = nRows - nTrain #", "= np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices = rows[nTrain:] # Check that training", "rcond = None)[0] # 8. Predict predict = XtestS1 @ w # 9.", "6. Tack column of 1s XtrainS1 = np.insert(XtrainS, 0, 1, 1) XtestS1 =", "1, 1) # 7. Find weights (solve for w) w = np.linalg.lstsq(XtrainS1.T @", "Cdata[:, 1:] # 4. Append column of 1s to X # X1 =", "data into training (80 %) and testing data (20 %) nRows = X.shape[0]", "0, 1, 1) # 7. Find weights (solve for w) w = np.linalg.lstsq(XtrainS1.T", "1) # 7. Find weights (solve for w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1,", "= T[trainIndices, :] Xtest = X[testIndices, :] Ttest = T[testIndices, :] # 5.", "standardize(origX): return (origX - means) / stds def unstandardize(stdX): return stds * stdX", "Tack column of 1s XtrainS1 = np.insert(XtrainS, 0, 1, 1) XtestS1 = np.insert(XtestS,", "- means) / stds def unstandardize(stdX): return stds * stdX + means return", "def standardize(origX): return (origX - means) / stds def unstandardize(stdX): return stds *", "converters = {3: missingIsNan}) # 2. 'Clean' the data. Cdata = data[~np.isnan(data).any(axis =", "training and testing sets are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :]", "= {3: missingIsNan}) # 2. 'Clean' the data. Cdata = data[~np.isnan(data).any(axis = 1)]", "stds def unstandardize(stdX): return stds * stdX + means return (standardize, unstandardize) if", "T[trainIndices, :] Xtest = X[testIndices, :] Ttest = T[testIndices, :] # 5. Standardize", "return stds * stdX + means return (standardize, unstandardize) if __name__ == '__main__':", "0, 1, 1) XtestS1 = np.insert(XtestS, 0, 1, 1) # 7. Find weights", "float(s) def makeStandardize(X): means = X.mean(axis = 0) stds = X.std(axis = 0)", "X = Cdata[:, 1:] # 4. Append column of 1s to X #", "X[trainIndices, :] Ttrain = T[trainIndices, :] Xtest = X[testIndices, :] Ttest = T[testIndices,", "= XtestS1 @ w # 9. Compute RSME rsme = np.sqrt(np.mean((predict - Ttest)**2))", "predict = XtestS1 @ w # 9. Compute RSME rsme = np.sqrt(np.mean((predict -", "0:1] X = Cdata[:, 1:] # 4. Append column of 1s to X", "= np.insert(X, 0, 1, 1) # 4. Split the data into training (80", "= mpg (first column) # Input = remaining - columns 2 to 7", "= remaining - columns 2 to 7 T = Cdata[:, 0:1] X =", ":] Xtest = X[testIndices, :] Ttest = T[testIndices, :] # 5. Standardize (standardize,", "- nTrain # Shuffle row numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain]", "import pprint def missingIsNan(s): return np.nan if s == b'?' else float(s) def", "def unstandardize(stdX): return stds * stdX + means return (standardize, unstandardize) if __name__", "1s to X # X1 = np.insert(X, 0, 1, 1) # 4. Split", "to 7 T = Cdata[:, 0:1] X = Cdata[:, 1:] # 4. Append", "print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :] Ttrain = T[trainIndices, :] Xtest = X[testIndices,", "Target = mpg (first column) # Input = remaining - columns 2 to", "testIndices)) Xtrain = X[trainIndices, :] Ttrain = T[trainIndices, :] Xtest = X[testIndices, :]", "Xtest = X[testIndices, :] Ttest = T[testIndices, :] # 5. Standardize (standardize, unstandardize)", "/ stds def unstandardize(stdX): return stds * stdX + means return (standardize, unstandardize)", "7. Find weights (solve for w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @", "target (T) # Target = mpg (first column) # Input = remaining -", "matplotlib.pyplot as plt import pprint def missingIsNan(s): return np.nan if s == b'?'", "Split it into input (X) and target (T) # Target = mpg (first", "1)] # 3. Split it into input (X) and target (T) # Target", "np import matplotlib.pyplot as plt import pprint def missingIsNan(s): return np.nan if s", "(20 %) nRows = X.shape[0] nTrain = int(round(0.8*nRows)) nTest = nRows - nTrain", "- columns 2 to 7 T = Cdata[:, 0:1] X = Cdata[:, 1:]", "@ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0] # 8. Predict predict =", "missingIsNan(s): return np.nan if s == b'?' else float(s) def makeStandardize(X): means =", "1, 1) # 4. Split the data into training (80 %) and testing", "= standardize(Xtrain) XtestS = standardize(Xtest) # 6. Tack column of 1s XtrainS1 =", "(origX - means) / stds def unstandardize(stdX): return stds * stdX + means", "and target (T) # Target = mpg (first column) # Input = remaining", "7 T = Cdata[:, 0:1] X = Cdata[:, 1:] # 4. Append column", "standardize(Xtest) # 6. Tack column of 1s XtrainS1 = np.insert(XtrainS, 0, 1, 1)", "columns 2 to 7 T = Cdata[:, 0:1] X = Cdata[:, 1:] #", "into input (X) and target (T) # Target = mpg (first column) #", "data[~np.isnan(data).any(axis = 1)] # 3. Split it into input (X) and target (T)", "to X # X1 = np.insert(X, 0, 1, 1) # 4. Split the", "Shuffle row numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices = rows[nTrain:]", "X.shape[0] nTrain = int(round(0.8*nRows)) nTest = nRows - nTrain # Shuffle row numbers", "data. Cdata = data[~np.isnan(data).any(axis = 1)] # 3. Split it into input (X)", "1. Load the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters = {3:", "unstandardize) if __name__ == '__main__': # 1. Load the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\",", "Cdata[:, 0:1] X = Cdata[:, 1:] # 4. Append column of 1s to", "np.insert(X, 0, 1, 1) # 4. Split the data into training (80 %)", "stds = X.std(axis = 0) def standardize(origX): return (origX - means) / stds", "the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters = {3: missingIsNan}) #", "T = Cdata[:, 0:1] X = Cdata[:, 1:] # 4. Append column of", "None)[0] # 8. Predict predict = XtestS1 @ w # 9. Compute RSME", "training (80 %) and testing data (20 %) nRows = X.shape[0] nTrain =", "disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :] Ttrain = T[trainIndices, :] Xtest", "mpg (first column) # Input = remaining - columns 2 to 7 T", "= X.mean(axis = 0) stds = X.std(axis = 0) def standardize(origX): return (origX", "1:] # 4. Append column of 1s to X # X1 = np.insert(X,", "into training (80 %) and testing data (20 %) nRows = X.shape[0] nTrain", "# 4. Split the data into training (80 %) and testing data (20", "X[testIndices, :] Ttest = T[testIndices, :] # 5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain)", "np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices = rows[nTrain:] # Check that training and", "4. Split the data into training (80 %) and testing data (20 %)", "nTrain = int(round(0.8*nRows)) nTest = nRows - nTrain # Shuffle row numbers rows", "Find weights (solve for w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain,", "column of 1s to X # X1 = np.insert(X, 0, 1, 1) #", "pprint def missingIsNan(s): return np.nan if s == b'?' else float(s) def makeStandardize(X):", "data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters = {3: missingIsNan}) # 2. 'Clean'", "== '__main__': # 1. Load the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8),", "%) and testing data (20 %) nRows = X.shape[0] nTrain = int(round(0.8*nRows)) nTest", "nRows = X.shape[0] nTrain = int(round(0.8*nRows)) nTest = nRows - nTrain # Shuffle", "testing data (20 %) nRows = X.shape[0] nTrain = int(round(0.8*nRows)) nTest = nRows", "rows = np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices = rows[nTrain:] # Check that", "if s == b'?' else float(s) def makeStandardize(X): means = X.mean(axis = 0)", "return np.nan if s == b'?' else float(s) def makeStandardize(X): means = X.mean(axis", "XtestS1 = np.insert(XtestS, 0, 1, 1) # 7. Find weights (solve for w)", "(standardize, unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS = standardize(Xtest) # 6. Tack", "unstandardize(stdX): return stds * stdX + means return (standardize, unstandardize) if __name__ ==", "4. Append column of 1s to X # X1 = np.insert(X, 0, 1,", "(T) # Target = mpg (first column) # Input = remaining - columns", "makeStandardize(X): means = X.mean(axis = 0) stds = X.std(axis = 0) def standardize(origX):", "Ttrain = T[trainIndices, :] Xtest = X[testIndices, :] Ttest = T[testIndices, :] #", "# 5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS = standardize(Xtest)", "XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0] # 8. Predict predict = XtestS1", "= int(round(0.8*nRows)) nTest = nRows - nTrain # Shuffle row numbers rows =", "* stdX + means return (standardize, unstandardize) if __name__ == '__main__': # 1.", "= np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0] # 8. Predict", "np.insert(XtrainS, 0, 1, 1) XtestS1 = np.insert(XtestS, 0, 1, 1) # 7. Find", "= np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters = {3: missingIsNan}) # 2. 'Clean' the", "Predict predict = XtestS1 @ w # 9. Compute RSME rsme = np.sqrt(np.mean((predict", "np.nan if s == b'?' else float(s) def makeStandardize(X): means = X.mean(axis =", "XtestS = standardize(Xtest) # 6. Tack column of 1s XtrainS1 = np.insert(XtrainS, 0,", "'__main__': # 1. Load the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters", "w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0] #", "the data into training (80 %) and testing data (20 %) nRows =", "nTrain # Shuffle row numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices", "Cdata = data[~np.isnan(data).any(axis = 1)] # 3. Split it into input (X) and", "5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS = standardize(Xtest) #", "Input = remaining - columns 2 to 7 T = Cdata[:, 0:1] X", "else float(s) def makeStandardize(X): means = X.mean(axis = 0) stds = X.std(axis =", "column) # Input = remaining - columns 2 to 7 T = Cdata[:,", "weights (solve for w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond", "for w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0]", "np.insert(XtestS, 0, 1, 1) # 7. Find weights (solve for w) w =", "w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0] # 8.", "= rows[nTrain:] # Check that training and testing sets are disjoint # print(np.intersect1d(trainIndices,", "# 8. Predict predict = XtestS1 @ w # 9. Compute RSME rsme", "# Input = remaining - columns 2 to 7 T = Cdata[:, 0:1]", "= 1)] # 3. Split it into input (X) and target (T) #", "def makeStandardize(X): means = X.mean(axis = 0) stds = X.std(axis = 0) def", "import numpy as np import matplotlib.pyplot as plt import pprint def missingIsNan(s): return", "= None)[0] # 8. Predict predict = XtestS1 @ w # 9. Compute", ":] Ttrain = T[trainIndices, :] Xtest = X[testIndices, :] Ttest = T[testIndices, :]", "Xtrain = X[trainIndices, :] Ttrain = T[trainIndices, :] Xtest = X[testIndices, :] Ttest", "if __name__ == '__main__': # 1. Load the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols", "def missingIsNan(s): return np.nan if s == b'?' else float(s) def makeStandardize(X): means", "'Clean' the data. Cdata = data[~np.isnan(data).any(axis = 1)] # 3. Split it into", "b'?' else float(s) def makeStandardize(X): means = X.mean(axis = 0) stds = X.std(axis", "# 4. Append column of 1s to X # X1 = np.insert(X, 0,", "rows[nTrain:] # Check that training and testing sets are disjoint # print(np.intersect1d(trainIndices, testIndices))", "1) # 4. Split the data into training (80 %) and testing data", "Split the data into training (80 %) and testing data (20 %) nRows", "that training and testing sets are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices,", "np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0] # 8. Predict predict", "(X) and target (T) # Target = mpg (first column) # Input =", "== b'?' else float(s) def makeStandardize(X): means = X.mean(axis = 0) stds =", "of 1s to X # X1 = np.insert(X, 0, 1, 1) # 4.", "Append column of 1s to X # X1 = np.insert(X, 0, 1, 1)", "= 0) def standardize(origX): return (origX - means) / stds def unstandardize(stdX): return", "= data[~np.isnan(data).any(axis = 1)] # 3. Split it into input (X) and target", "input (X) and target (T) # Target = mpg (first column) # Input", "+ means return (standardize, unstandardize) if __name__ == '__main__': # 1. Load the", "= T[testIndices, :] # 5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain)", "= Cdata[:, 1:] # 4. Append column of 1s to X # X1", "Load the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters = {3: missingIsNan})", "X1 = np.insert(X, 0, 1, 1) # 4. Split the data into training", "makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS = standardize(Xtest) # 6. Tack column of 1s", "# 6. Tack column of 1s XtrainS1 = np.insert(XtrainS, 0, 1, 1) XtestS1", "__name__ == '__main__': # 1. Load the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols =", "trainIndices = rows[:nTrain] testIndices = rows[nTrain:] # Check that training and testing sets", "= standardize(Xtest) # 6. Tack column of 1s XtrainS1 = np.insert(XtrainS, 0, 1,", "XtestS1 @ w # 9. Compute RSME rsme = np.sqrt(np.mean((predict - Ttest)**2)) print(rsme)", "as np import matplotlib.pyplot as plt import pprint def missingIsNan(s): return np.nan if", "X # X1 = np.insert(X, 0, 1, 1) # 4. Split the data", "# X1 = np.insert(X, 0, 1, 1) # 4. Split the data into", "nTest = nRows - nTrain # Shuffle row numbers rows = np.arange(nRows) np.random.shuffle(rows)", "Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS = standardize(Xtest) # 6.", "unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS = standardize(Xtest) # 6. Tack column", "= 0) stds = X.std(axis = 0) def standardize(origX): return (origX - means)", "X.std(axis = 0) def standardize(origX): return (origX - means) / stds def unstandardize(stdX):", "# Check that training and testing sets are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain", "sets are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :] Ttrain = T[trainIndices,", "1, 1) XtestS1 = np.insert(XtestS, 0, 1, 1) # 7. Find weights (solve", "data (20 %) nRows = X.shape[0] nTrain = int(round(0.8*nRows)) nTest = nRows -", "and testing data (20 %) nRows = X.shape[0] nTrain = int(round(0.8*nRows)) nTest =", "# Target = mpg (first column) # Input = remaining - columns 2", "return (standardize, unstandardize) if __name__ == '__main__': # 1. Load the data. data", "nRows - nTrain # Shuffle row numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices =", "Ttest = T[testIndices, :] # 5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS =", "means = X.mean(axis = 0) stds = X.std(axis = 0) def standardize(origX): return", "(standardize, unstandardize) if __name__ == '__main__': # 1. Load the data. data =", "the data. Cdata = data[~np.isnan(data).any(axis = 1)] # 3. Split it into input", "rows[:nTrain] testIndices = rows[nTrain:] # Check that training and testing sets are disjoint", "= np.insert(XtestS, 0, 1, 1) # 7. Find weights (solve for w) w", "T[testIndices, :] # 5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS", "return (origX - means) / stds def unstandardize(stdX): return stds * stdX +", "1) XtestS1 = np.insert(XtestS, 0, 1, 1) # 7. Find weights (solve for", "testing sets are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :] Ttrain =", ":] Ttest = T[testIndices, :] # 5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS", "2. 'Clean' the data. Cdata = data[~np.isnan(data).any(axis = 1)] # 3. Split it", "(first column) # Input = remaining - columns 2 to 7 T =", "remaining - columns 2 to 7 T = Cdata[:, 0:1] X = Cdata[:,", "Ttrain, rcond = None)[0] # 8. Predict predict = XtestS1 @ w #", "= Cdata[:, 0:1] X = Cdata[:, 1:] # 4. Append column of 1s", "missingIsNan}) # 2. 'Clean' the data. Cdata = data[~np.isnan(data).any(axis = 1)] # 3.", "# Shuffle row numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices =", "and testing sets are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :] Ttrain", "stds * stdX + means return (standardize, unstandardize) if __name__ == '__main__': #", "1s XtrainS1 = np.insert(XtrainS, 0, 1, 1) XtestS1 = np.insert(XtestS, 0, 1, 1)", "# 7. Find weights (solve for w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T", "= X.shape[0] nTrain = int(round(0.8*nRows)) nTest = nRows - nTrain # Shuffle row", "= X[trainIndices, :] Ttrain = T[trainIndices, :] Xtest = X[testIndices, :] Ttest =", "import matplotlib.pyplot as plt import pprint def missingIsNan(s): return np.nan if s ==", "numpy as np import matplotlib.pyplot as plt import pprint def missingIsNan(s): return np.nan", "it into input (X) and target (T) # Target = mpg (first column)", "0) stds = X.std(axis = 0) def standardize(origX): return (origX - means) /", "# 1. Load the data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters =", "= rows[:nTrain] testIndices = rows[nTrain:] # Check that training and testing sets are", "of 1s XtrainS1 = np.insert(XtrainS, 0, 1, 1) XtestS1 = np.insert(XtestS, 0, 1,", "row numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices = rows[nTrain:] #", "(solve for w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond =", "means return (standardize, unstandardize) if __name__ == '__main__': # 1. Load the data.", "int(round(0.8*nRows)) nTest = nRows - nTrain # Shuffle row numbers rows = np.arange(nRows)", "(80 %) and testing data (20 %) nRows = X.shape[0] nTrain = int(round(0.8*nRows))", "@ Ttrain, rcond = None)[0] # 8. Predict predict = XtestS1 @ w", "= X.std(axis = 0) def standardize(origX): return (origX - means) / stds def", "means) / stds def unstandardize(stdX): return stds * stdX + means return (standardize,", "8. Predict predict = XtestS1 @ w # 9. Compute RSME rsme =", "range(8), converters = {3: missingIsNan}) # 2. 'Clean' the data. Cdata = data[~np.isnan(data).any(axis", ":] # 5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS =", "stdX + means return (standardize, unstandardize) if __name__ == '__main__': # 1. Load", "= nRows - nTrain # Shuffle row numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices", "3. Split it into input (X) and target (T) # Target = mpg", "0, 1, 1) # 4. Split the data into training (80 %) and", "= range(8), converters = {3: missingIsNan}) # 2. 'Clean' the data. Cdata =", "X.mean(axis = 0) stds = X.std(axis = 0) def standardize(origX): return (origX -", "= makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS = standardize(Xtest) # 6. Tack column of", "data. data = np.loadtxt(\"Data\\\\auto-mpg.data\", usecols = range(8), converters = {3: missingIsNan}) # 2." ]
[ "if m == 2: m = 12 elif m == 1: m =", "1: m = 11 else: m -= 2 c = year%100 print((day +", "11 else: m -= 2 c = year%100 print((day + ((13*m -1) //5)", "2: m = 12 elif m == 1: m = 11 else: m", "int(input(\": \")) year = int(input(\": \")) if m == 2: m = 12", "= int(input(\": \")) if m == 2: m = 12 elif m ==", "((13*m -1) //5) + year + year + (year //4 + c//4 -", "day = int(input(\": \")) m = int(input(\": \")) year = int(input(\": \")) if", "year%100 print((day + ((13*m -1) //5) + year + year + (year //4", "//4 + c//4 - 2*c + 777)) % 7 ) #not job !!!!", "m = 12 elif m == 1: m = 11 else: m -=", "int(input(\": \")) if m == 2: m = 12 elif m == 1:", "+ (year //4 + c//4 - 2*c + 777)) % 7 ) #not", "m == 2: m = 12 elif m == 1: m = 11", "-1) //5) + year + year + (year //4 + c//4 - 2*c", "m -= 2 c = year%100 print((day + ((13*m -1) //5) + year", "(year //4 + c//4 - 2*c + 777)) % 7 ) #not job", "== 1: m = 11 else: m -= 2 c = year%100 print((day", "year + (year //4 + c//4 - 2*c + 777)) % 7 )", "elif m == 1: m = 11 else: m -= 2 c =", "year + year + (year //4 + c//4 - 2*c + 777)) %", "12 elif m == 1: m = 11 else: m -= 2 c", "== 2: m = 12 elif m == 1: m = 11 else:", "= int(input(\": \")) year = int(input(\": \")) if m == 2: m =", "\")) year = int(input(\": \")) if m == 2: m = 12 elif", "+ year + (year //4 + c//4 - 2*c + 777)) % 7", "= 12 elif m == 1: m = 11 else: m -= 2", "\")) if m == 2: m = 12 elif m == 1: m", "= year%100 print((day + ((13*m -1) //5) + year + year + (year", "print((day + ((13*m -1) //5) + year + year + (year //4 +", "m == 1: m = 11 else: m -= 2 c = year%100", "year = int(input(\": \")) if m == 2: m = 12 elif m", "+ year + year + (year //4 + c//4 - 2*c + 777))", "= int(input(\": \")) m = int(input(\": \")) year = int(input(\": \")) if m", "-= 2 c = year%100 print((day + ((13*m -1) //5) + year +", "= 11 else: m -= 2 c = year%100 print((day + ((13*m -1)", "\")) m = int(input(\": \")) year = int(input(\": \")) if m == 2:", "2 c = year%100 print((day + ((13*m -1) //5) + year + year", "else: m -= 2 c = year%100 print((day + ((13*m -1) //5) +", "<gh_stars>0 day = int(input(\": \")) m = int(input(\": \")) year = int(input(\": \"))", "c = year%100 print((day + ((13*m -1) //5) + year + year +", "+ ((13*m -1) //5) + year + year + (year //4 + c//4", "//5) + year + year + (year //4 + c//4 - 2*c +", "m = 11 else: m -= 2 c = year%100 print((day + ((13*m", "m = int(input(\": \")) year = int(input(\": \")) if m == 2: m", "int(input(\": \")) m = int(input(\": \")) year = int(input(\": \")) if m ==" ]
[ "text not null primary key, date text, title text, content text, public integer", "key, date text, title text, content text, public integer db.close_connection() if __name__ ==", "# print(res) # (id text not null primary key, date text, title text,", "is a secret',3) # ) # res = db.select(\"SELECT * FROM blogs WHERE", "'2021-03-07', 'Secret blog' ,'This is a secret',3) # ) # res = db.select(\"SELECT", "* FROM blogs1\") print(res) # for i in range(1): # db.insert( # query=\"INSERT", "text, title text, content text, public integer db.close_connection() if __name__ == \"__main__\": main()", "public >= 3\") # print(res) # (id text not null primary key, date", "FROM blogs1\") print(res) # for i in range(1): # db.insert( # query=\"INSERT INTO", "db.insert( # query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This", ",'This is a secret',3) # ) # res = db.select(\"SELECT * FROM blogs", "abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT *", "db = SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT * FROM blogs1\") print(res) # for i", "import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT * FROM", "for i in range(1): # db.insert( # query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\", #", "print(res) # (id text not null primary key, date text, title text, content", "# res = db.select(\"SELECT * FROM blogs WHERE public >= 3\") # print(res)", "logging from abc_core.database.sqllite_client import SQLLite from abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db", "logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT * FROM blogs1\") print(res) # for", "INTO blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a secret',3)", "get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT * FROM blogs1\")", ") # res = db.select(\"SELECT * FROM blogs WHERE public >= 3\") #", "WHERE public >= 3\") # print(res) # (id text not null primary key,", "# (id text not null primary key, date text, title text, content text,", "query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a", "primary key, date text, title text, content text, public integer db.close_connection() if __name__", "blogs1\") print(res) # for i in range(1): # db.insert( # query=\"INSERT INTO blogs", "from abc_core.database.sqllite_client import SQLLite from abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db =", "'Secret blog' ,'This is a secret',3) # ) # res = db.select(\"SELECT *", "db.select(\"SELECT * FROM blogs WHERE public >= 3\") # print(res) # (id text", "SQLLite from abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res =", "null primary key, date text, title text, content text, public integer db.close_connection() if", "= db.select(\"SELECT * FROM blogs WHERE public >= 3\") # print(res) # (id", "range(1): # db.insert( # query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret", "* FROM blogs WHERE public >= 3\") # print(res) # (id text not", "import SQLLite from abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res", "import logging from abc_core.database.sqllite_client import SQLLite from abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config())", "= db.select(\"SELECT * FROM blogs1\") print(res) # for i in range(1): # db.insert(", "i in range(1): # db.insert( # query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}',", "data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a secret',3) # ) # res =", "blog' ,'This is a secret',3) # ) # res = db.select(\"SELECT * FROM", "# ) # res = db.select(\"SELECT * FROM blogs WHERE public >= 3\")", "date text, title text, content text, public integer db.close_connection() if __name__ == \"__main__\":", "(id text not null primary key, date text, title text, content text, public", "res = db.select(\"SELECT * FROM blogs WHERE public >= 3\") # print(res) #", "= SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT * FROM blogs1\") print(res) # for i in", "# data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a secret',3) # ) # res", "in range(1): # db.insert( # query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07',", "abc_core.database.sqllite_client import SQLLite from abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\")", "3\") # print(res) # (id text not null primary key, date text, title", "def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT * FROM blogs1\") print(res)", "print(res) # for i in range(1): # db.insert( # query=\"INSERT INTO blogs VALUES", "main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT * FROM blogs1\") print(res) #", "res = db.select(\"SELECT * FROM blogs1\") print(res) # for i in range(1): #", "# for i in range(1): # db.insert( # query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\",", "a secret',3) # ) # res = db.select(\"SELECT * FROM blogs WHERE public", "secret',3) # ) # res = db.select(\"SELECT * FROM blogs WHERE public >=", "blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a secret',3) #", "# query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is", "(?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a secret',3) # ) #", "VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a secret',3) # )", "db.select(\"SELECT * FROM blogs1\") print(res) # for i in range(1): # db.insert( #", "FROM blogs WHERE public >= 3\") # print(res) # (id text not null", "# db.insert( # query=\"INSERT INTO blogs VALUES (?,?,?,?,?);\", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog'", "SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT * FROM blogs1\") print(res) # for i in range(1):", "from abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename=\"../../data/application.db\") res = db.select(\"SELECT", "blogs WHERE public >= 3\") # print(res) # (id text not null primary", "not null primary key, date text, title text, content text, public integer db.close_connection()", ">= 3\") # print(res) # (id text not null primary key, date text," ]
[ "class ProvisioningContext: def __init__(self): self.runbook = None self.role = None context = ProvisioningContext()", "<gh_stars>0 class ProvisioningContext: def __init__(self): self.runbook = None self.role = None context =" ]
[ "'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl += _kl else: x =", "self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl", "BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5", "self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 1 self.squeeze1", "self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3,", "self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8,", "nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5)", "layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x)", "self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3,", "self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() #", "BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly modified Fire modules and Bayesian layers. \"\"\" def", "384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire module 6 self.squeeze6 = BBBConv2d(384,", "= nn.Softplus() # Fire module 8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 =", "kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus()", "with slightly modified Fire modules and Bayesian layers. \"\"\" def __init__(self, outputs, inputs):", "= nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire", "self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7,", "= BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire module 6 self.squeeze6", "padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2", "if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl elif", "nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 1 self.squeeze1 = BBBConv2d(64,", "= BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1)", "kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module", "= BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1)", "self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13 * 13", "128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire module 2 self.squeeze2 = BBBConv2d(128,", "kl += _kl else: x = layer(x) logits = x print('logits', logits) return", "* 100) self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs) layers = [self.conv1,", "self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1", "self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire module 2", "= nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 5 self.squeeze5 =", "self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 5 self.squeeze5", "BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4", "= FlattenLayer(13 * 13 * 100) self.fc1 = BBBLinearFactorial(13 * 13 * 100,", "= nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus()", "torch.nn as nn from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet", "kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1)", "self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5,", "self.expand3x3_activation5 = nn.Softplus() # Fire module 6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6", "BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire module 8 self.squeeze8 =", "nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire module", "kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus()", "nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 5 self.squeeze5 = BBBConv2d(256,", "nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire module", "inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1", "Fire module 7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 =", "stride=2, ceil_mode=True) # Fire module 1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 =", "outputs) layers = [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2,", "self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5,", "BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) #", "= nn.Softplus() # Fire module 6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 =", "padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire module 4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1)", "= layer.fcprobforward(x) kl += _kl else: x = layer(x) logits = x print('logits',", "= BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)", "nn.Softplus() # Fire module 8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus()", "with Bayesian weights' kl = 0 for layer in self.layers: if hasattr(layer, 'convprobforward')", "nn.ModuleList(layers) def probforward(self, x): 'Forward pass with Bayesian weights' kl = 0 for", "kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire module 7 self.squeeze7 = BBBConv2d(384, 64,", "self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward pass with Bayesian weights' kl =", "512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs,", "self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6,", "self.expand3x3_activation3 = nn.Softplus() # Fire module 4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4", "self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7,", "self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 3 self.squeeze3", "2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128,", "100, outputs) layers = [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2,", "self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2", "128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire", "self.expand3x3_activation1 = nn.Softplus() # Fire module 2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2", "256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire module 4 self.squeeze4 = BBBConv2d(256,", "nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3,", "+= _kl else: x = layer(x) logits = x print('logits', logits) return logits,", "self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3,", "outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 = nn.Softplus()", "self.conv2, self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward pass with", "# Fire module 4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4", "BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3", "kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus()", "48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 =", "kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus()", "kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus()", "# Fire module 2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2", "callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward):", "= nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire", "self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4,", "class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly modified Fire modules and Bayesian layers. \"\"\"", "BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2", "= BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1)", "nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire module", "self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers)", "self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7,", "nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5", "pass with Bayesian weights' kl = 0 for layer in self.layers: if hasattr(layer,", "weights' kl = 0 for layer in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward):", "def probforward(self, x): 'Forward pass with Bayesian weights' kl = 0 for layer", "self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() #", "nn.Softplus() # Fire module 7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus()", "* 100, outputs) layers = [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2,", "Bayesian weights' kl = 0 for layer in self.layers: if hasattr(layer, 'convprobforward') and", "BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8", "callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl += _kl else: x = layer(x) logits", "FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly modified Fire modules and Bayesian", "FlattenLayer(13 * 13 * 100) self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs)", "BBBLinearFactorial(13 * 13 * 100, outputs) layers = [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1,", "= nn.Softplus() # Fire module 7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 =", "4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256,", "self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4,", "self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward", "self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward pass with Bayesian weights'", "BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) #", "self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3,", "_kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl += _kl", "# Fire module 6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6", "Fire module 8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 =", "64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 =", "SqueezeNet with slightly modified Fire modules and Bayesian layers. \"\"\" def __init__(self, outputs,", "BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire module 2 self.squeeze2 =", "self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2,", "nn.Softplus() # Fire module 6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus()", "self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten =", "self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs) layers = [self.conv1, self.soft1, self.pool1,", "# Fire module 3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3", "= nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 1 self.squeeze1 =", "_kl else: x = layer(x) logits = x print('logits', logits) return logits, kl", "= nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire", "= nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1)", "module 7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64,", "32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 =", "kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire module 4 self.squeeze4 = BBBConv2d(256, 32,", "ceil_mode=True) # Fire module 1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus()", "self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() #", "self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward pass", "ceil_mode=True) # Fire module 3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus()", "48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 =", "= BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1)", "kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus()", "= BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13 * 13 *", "self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def probforward(self,", "self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() #", "nn.Softplus() # Fire module 4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus()", "self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2,", "x, _kl, = layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x,", "kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module", "self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3", "= nn.Softplus() # Fire module 2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 =", "elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl += _kl else:", "module 1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16,", "self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5,", "self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2 =", "= BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire module 4 self.squeeze4", "kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire module 8 self.squeeze8 = BBBConv2d(512, 64,", "= nn.ModuleList(layers) def probforward(self, x): 'Forward pass with Bayesian weights' kl = 0", "kl += _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl", "256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire", "self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2,", "BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13 * 13 * 100)", "slightly modified Fire modules and Bayesian layers. \"\"\" def __init__(self, outputs, inputs): super(BBBSqueezeNet,", "self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def probforward(self, x):", "= nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1)", "\"\"\" SqueezeNet with slightly modified Fire modules and Bayesian layers. \"\"\" def __init__(self,", "self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers", "self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() #", "nn.Softplus() self.flatten = FlattenLayer(13 * 13 * 100) self.fc1 = BBBLinearFactorial(13 * 13", "self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6,", "7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512,", "= BBBLinearFactorial(13 * 13 * 100, outputs) layers = [self.conv1, self.soft1, self.pool1, self.squeeze1,", "nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3,", "self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3,", "ceil_mode=True) # Fire module 5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus()", "Fire module 5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 =", "self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6,", "3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256,", "= nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 =", "BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6", "# Fire module 7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7", "padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 5", "and Bayesian layers. \"\"\" def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs,", "kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus()", "self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2,", "BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire module 4 self.squeeze4 =", "nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13 *", "1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128,", "kernel_size=1) self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13 * 13 * 100) self.fc1 =", "Fire module 1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 =", "as nn from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with", "BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly modified Fire modules and Bayesian layers.", "kl = 0 for layer in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x,", "from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly modified", "nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire module", "nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire module", "layer.fcprobforward(x) kl += _kl else: x = layer(x) logits = x print('logits', logits)", "self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2", "+= _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl +=", "= nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire", "13 * 100, outputs) layers = [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1,", "= BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1)", "self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4,", "stride=2, ceil_mode=True) # Fire module 3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 =", "BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) #", "nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten", "_kl, = layer.fcprobforward(x) kl += _kl else: x = layer(x) logits = x", "_kl, = layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl,", "hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl += _kl else: x", "padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire module 7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1)", "Fire module 2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 =", "self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3,", "module 5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48,", "# Fire module 1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1", "self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def", "= BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire module 2 self.squeeze2", "layer in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl", "nn from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly", "self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire module 7", "'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward')", "self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers =", "self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3,", "8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512,", "= 0 for layer in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl,", "self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire module 8", "self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3,", "nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1", "import torch.nn as nn from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\"", "layers = [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2,", "self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten,", "def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1", "in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl +=", "and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward') and", "= BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire module 7 self.squeeze7", "self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7,", "= BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 =", "self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3,", "= BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1)", "64, kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire", "self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3,", "= BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)", "384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire module 7 self.squeeze7 = BBBConv2d(384,", "= nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 3 self.squeeze3 =", "import BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly modified Fire modules", "BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire module 6 self.squeeze6 =", "self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5,", "6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384,", "= layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, =", "BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly modified Fire modules and", "nn.Softplus() # Fire module 2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus()", "padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire module 8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1)", "13 * 100) self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs) layers =", "self.expand3x3_activation6 = nn.Softplus() # Fire module 7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7", "* 13 * 100, outputs) layers = [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1,", "utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): \"\"\" SqueezeNet with slightly modified Fire", "= nn.Softplus() # Fire module 4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 =", "= BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire module 8 self.squeeze8", "= nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1)", "self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3,", "self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1]", "= BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)", "# Fire module 5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5", "module 2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16,", "kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire module 2 self.squeeze2 = BBBConv2d(128, 16,", "super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1 =", "self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 5 self.squeeze5 = BBBConv2d(256, 48,", "x): 'Forward pass with Bayesian weights' kl = 0 for layer in self.layers:", "layers. \"\"\" def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3,", "Fire modules and Bayesian layers. \"\"\" def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1", "512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire module 8 self.squeeze8 = BBBConv2d(512,", "module 3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32,", "__init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 =", "self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8,", "probforward(self, x): 'Forward pass with Bayesian weights' kl = 0 for layer in", "5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384,", "x, _kl, = layer.fcprobforward(x) kl += _kl else: x = layer(x) logits =", "self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 3 self.squeeze3 = BBBConv2d(128, 32,", "outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13 * 13 * 100) self.fc1", "= nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 =", "100) self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs) layers = [self.conv1, self.soft1,", "BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7", "self.expand3x3_activation7 = nn.Softplus() # Fire module 8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8", "= [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2,", "Bayesian layers. \"\"\" def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64,", "self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire module 6", "Fire module 4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 =", "= BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1)", "kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module", "modified Fire modules and Bayesian layers. \"\"\" def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__()", "BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512,", "BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire module 7 self.squeeze7 =", "Fire module 3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 =", "= nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 =", "self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3,", "\"\"\" def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2)", "self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8,", "'Forward pass with Bayesian weights' kl = 0 for layer in self.layers: if", "[self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3,", "= BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1)", "= nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13", "64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 =", "nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 3 self.squeeze3 = BBBConv2d(128,", "padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire module 6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1)", "padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire module 2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1)", "BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1", "32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 =", "kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus()", "for layer in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x)", "= nn.Softplus() self.flatten = FlattenLayer(13 * 13 * 100) self.fc1 = BBBLinearFactorial(13 *", "self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 1 self.squeeze1 = BBBConv2d(64, 16,", "self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire module 4", "stride=2, ceil_mode=True) # Fire module 5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 =", "stride=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 1", "modules and Bayesian layers. \"\"\" def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 =", "self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6,", "self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13 * 13 * 100) self.fc1 = BBBLinearFactorial(13", "and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl += _kl else: x = layer(x)", "self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8,", "module 8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64,", "# Fire module 8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8", "= nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire", "16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 =", "16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 =", "self.fc1] self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward pass with Bayesian weights' kl", "self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1,", "module 6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48,", "Fire module 6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 =", "* 13 * 100) self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs) layers", "0 for layer in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, =", "self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3,", "kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire module 6 self.squeeze6 = BBBConv2d(384, 48,", "self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2,", "nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3", "module 4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32,", "self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4,", "hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl elif hasattr(layer,", "padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 3", "self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward pass with Bayesian", "self.flatten = FlattenLayer(13 * 13 * 100) self.fc1 = BBBLinearFactorial(13 * 13 *" ]
[ "class TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator = EmailValidator() test_data = [ (\"<EMAIL>\", True), (\"<EMAIL>\",", "import EmailValidator class TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator = EmailValidator() test_data = [ (\"<EMAIL>\",", "False), (\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\", False) ] for item in", "True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\", False),", "(\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\",", "email_validator import EmailValidator class TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator = EmailValidator() test_data = [", "EmailValidator() test_data = [ (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\",", "False), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\", False) ] for", "test_validates_document(self): email_validator = EmailValidator() test_data = [ (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True),", "[ (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\", False),", "email_validator = EmailValidator() test_data = [ (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\",", "from email_validator import EmailValidator class TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator = EmailValidator() test_data =", "(\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\", False) ] for item", "test_data = [ (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", False),", "True), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\", False)", "def test_validates_document(self): email_validator = EmailValidator() test_data = [ (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\",", "True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\", False),", "False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\", False) ] for item in test_data: self.assertEqual(item[1],", "(\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\",", "TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator = EmailValidator() test_data = [ (\"<EMAIL>\", True), (\"<EMAIL>\", True),", "unittest from email_validator import EmailValidator class TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator = EmailValidator() test_data", "= EmailValidator() test_data = [ (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True),", "(\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\", False) ] for item in test_data:", "True), (\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False),", "(\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\", False) ] for item in test_data: self.assertEqual(item[1], email_validator.is_valid(item[0]))", "import unittest from email_validator import EmailValidator class TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator = EmailValidator()", "(\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\", False) ]", "(\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\", False), (\"person@\",", "= [ (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\",", "<filename>src/python/validators/test_email_validator.py import unittest from email_validator import EmailValidator class TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator =", "EmailValidator class TestEmailValidator(unittest.TestCase): def test_validates_document(self): email_validator = EmailValidator() test_data = [ (\"<EMAIL>\", True),", "(\"<EMAIL>\", True), (\"<EMAIL>\", True), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\"<EMAIL>\", False), (\".<EMAIL>\", False), (\"<EMAIL>.\"," ]
[ "django.contrib import admin from .models import Balance class BalanceAdmin(admin.ModelAdmin): list_display = ('balance',) admin.site.register(Balance)", "from django.contrib import admin from .models import Balance class BalanceAdmin(admin.ModelAdmin): list_display = ('balance',)" ]
[ "\"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set error rate parameter for", "self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set error rate parameter for error", "Apply the error model on dialog act. Args: dialog_act (tuple): Dialog act. Returns:", "apply(self, dialog_act): \"\"\" Apply the error model on dialog act. Args: dialog_act (tuple):", "= slot_rate def apply(self, dialog_act): \"\"\" Apply the error model on dialog act.", "(c) Microsoft Corporation. # Licensed under the MIT license. \"\"\" \"\"\" class ErrorNLU:", "def apply(self, dialog_act): \"\"\" Apply the error model on dialog act. Args: dialog_act", "slot_rate): \"\"\" Set error rate parameter for error model. Args: act_type_rate (float): The", "Licensed under the MIT license. \"\"\" \"\"\" class ErrorNLU: \"\"\"Base model for generating", "The error rate applied on dialog act type. slot_rate (float): Error rate applied", "dialog act. Args: dialog_act (tuple): Dialog act. Returns: dialog_act (tuple): Dialog act with", "under the MIT license. \"\"\" \"\"\" class ErrorNLU: \"\"\"Base model for generating NLU", "slot_rate=0.0): \"\"\" Args: act_type_rate (float): The error rate applied on dialog act type.", "error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate (float): The error rate applied", "on slots. \"\"\" self.act_type_rate = act_type_rate self.slot_rate = slot_rate def apply(self, dialog_act): \"\"\"", "# Licensed under the MIT license. \"\"\" \"\"\" class ErrorNLU: \"\"\"Base model for", "act_type_rate self.slot_rate = slot_rate def apply(self, dialog_act): \"\"\" Apply the error model on", "(tuple): Dialog act. Returns: dialog_act (tuple): Dialog act with noise. \"\"\" #TODO return", "model for generating NLU error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate (float):", "self.act_type_rate = act_type_rate self.slot_rate = slot_rate def apply(self, dialog_act): \"\"\" Apply the error", "on dialog act type. slot_rate (float): Error rate applied on slots. \"\"\" self.act_type_rate", "Error rate applied on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate, slot_rate): \"\"\"", "MIT license. \"\"\" \"\"\" class ErrorNLU: \"\"\"Base model for generating NLU error.\"\"\" def", "applied on dialog act type. slot_rate (float): Error rate applied on slots. \"\"\"", "type. slot_rate (float): Error rate applied on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self,", "= act_type_rate self.slot_rate = slot_rate def apply(self, dialog_act): \"\"\" Apply the error model", "dialog act type. slot_rate (float): Error rate applied on slots. \"\"\" self.act_type_rate =", "\"\"\" Set error rate parameter for error model. Args: act_type_rate (float): The error", "Copyright (c) Microsoft Corporation. # Licensed under the MIT license. \"\"\" \"\"\" class", "# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. \"\"\" \"\"\"", "ErrorNLU: \"\"\"Base model for generating NLU error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args:", "type. slot_rate (float): Error rate applied on slots. \"\"\" self.act_type_rate = act_type_rate self.slot_rate", "slot_rate def apply(self, dialog_act): \"\"\" Apply the error model on dialog act. Args:", "rate applied on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set", "\"\"\" self.act_type_rate = act_type_rate self.slot_rate = slot_rate def apply(self, dialog_act): \"\"\" Apply the", "on dialog act type. slot_rate (float): Error rate applied on slots. \"\"\" self.set_error_rate(act_type_rate,", "Corporation. # Licensed under the MIT license. \"\"\" \"\"\" class ErrorNLU: \"\"\"Base model", "applied on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set error", "Args: act_type_rate (float): The error rate applied on dialog act type. slot_rate (float):", "on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set error rate", "dialog act type. slot_rate (float): Error rate applied on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate)", "act_type_rate, slot_rate): \"\"\" Set error rate parameter for error model. Args: act_type_rate (float):", "set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set error rate parameter for error model. Args: act_type_rate", "\"\"\" class ErrorNLU: \"\"\"Base model for generating NLU error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0):", "error model on dialog act. Args: dialog_act (tuple): Dialog act. Returns: dialog_act (tuple):", "(float): Error rate applied on slots. \"\"\" self.act_type_rate = act_type_rate self.slot_rate = slot_rate", "\"\"\"Base model for generating NLU error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate", "def set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set error rate parameter for error model. Args:", "applied on slots. \"\"\" self.act_type_rate = act_type_rate self.slot_rate = slot_rate def apply(self, dialog_act):", "(float): The error rate applied on dialog act type. slot_rate (float): Error rate", "\"\"\" Apply the error model on dialog act. Args: dialog_act (tuple): Dialog act.", "on dialog act. Args: dialog_act (tuple): Dialog act. Returns: dialog_act (tuple): Dialog act", "act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate (float): The error rate applied on dialog act", "def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate (float): The error rate applied on", "rate applied on dialog act type. slot_rate (float): Error rate applied on slots.", "slots. \"\"\" self.act_type_rate = act_type_rate self.slot_rate = slot_rate def apply(self, dialog_act): \"\"\" Apply", "(float): Error rate applied on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate, slot_rate):", "the MIT license. \"\"\" \"\"\" class ErrorNLU: \"\"\"Base model for generating NLU error.\"\"\"", "error rate applied on dialog act type. slot_rate (float): Error rate applied on", "dialog_act): \"\"\" Apply the error model on dialog act. Args: dialog_act (tuple): Dialog", "act type. slot_rate (float): Error rate applied on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def", "generating NLU error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate (float): The error", "self.slot_rate = slot_rate def apply(self, dialog_act): \"\"\" Apply the error model on dialog", "act type. slot_rate (float): Error rate applied on slots. \"\"\" self.act_type_rate = act_type_rate", "\"\"\" \"\"\" class ErrorNLU: \"\"\"Base model for generating NLU error.\"\"\" def __init__(self, act_type_rate=0.0,", "Error rate applied on slots. \"\"\" self.act_type_rate = act_type_rate self.slot_rate = slot_rate def", "license. \"\"\" \"\"\" class ErrorNLU: \"\"\"Base model for generating NLU error.\"\"\" def __init__(self,", "error rate parameter for error model. Args: act_type_rate (float): The error rate applied", "NLU error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate (float): The error rate", "error model. Args: act_type_rate (float): The error rate applied on dialog act type.", "parameter for error model. Args: act_type_rate (float): The error rate applied on dialog", "slot_rate (float): Error rate applied on slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate,", "for error model. Args: act_type_rate (float): The error rate applied on dialog act", "rate applied on slots. \"\"\" self.act_type_rate = act_type_rate self.slot_rate = slot_rate def apply(self,", "slot_rate (float): Error rate applied on slots. \"\"\" self.act_type_rate = act_type_rate self.slot_rate =", "class ErrorNLU: \"\"\"Base model for generating NLU error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\"", "\"\"\" Args: act_type_rate (float): The error rate applied on dialog act type. slot_rate", "model on dialog act. Args: dialog_act (tuple): Dialog act. Returns: dialog_act (tuple): Dialog", "Microsoft Corporation. # Licensed under the MIT license. \"\"\" \"\"\" class ErrorNLU: \"\"\"Base", "slot_rate) def set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set error rate parameter for error model.", "dialog_act (tuple): Dialog act. Returns: dialog_act (tuple): Dialog act with noise. \"\"\" #TODO", "the error model on dialog act. Args: dialog_act (tuple): Dialog act. Returns: dialog_act", "act. Args: dialog_act (tuple): Dialog act. Returns: dialog_act (tuple): Dialog act with noise.", "for generating NLU error.\"\"\" def __init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate (float): The", "Args: dialog_act (tuple): Dialog act. Returns: dialog_act (tuple): Dialog act with noise. \"\"\"", "rate parameter for error model. Args: act_type_rate (float): The error rate applied on", "model. Args: act_type_rate (float): The error rate applied on dialog act type. slot_rate", "__init__(self, act_type_rate=0.0, slot_rate=0.0): \"\"\" Args: act_type_rate (float): The error rate applied on dialog", "slots. \"\"\" self.set_error_rate(act_type_rate, slot_rate) def set_error_rate(self, act_type_rate, slot_rate): \"\"\" Set error rate parameter", "Set error rate parameter for error model. Args: act_type_rate (float): The error rate", "act_type_rate (float): The error rate applied on dialog act type. slot_rate (float): Error" ]
[ "saberbot locations: json file containing the openweathermap location data \"\"\" def __init__(self, bot):", "location, country): print(location) for item in self.locations_json: if item[\"name\"] == location: if not", "l.append(country) l.append(location) print(l) location_id = self.get_location_id(location, country) if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\"", "with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses list of", "\"\"\"Weather class handles weather using openweather api params: attributes: apikey: api key for", "config_location: configuration location for saberbot locations: json file containing the openweathermap location data", "= self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] =", "{relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C /", "= re.sub(regex, \"\", location) #transform location into string with spaces l = []", "requests, io, re class Weather: \"\"\"Weather class handles weather using openweather api params:", "import discord from discord.ext import commands import json, requests, io, re class Weather:", "params: attributes: apikey: api key for openweather config_location: configuration location for saberbot locations:", "locations: json file containing the openweathermap location data \"\"\" def __init__(self, bot): self.bot", "api params: attributes: apikey: api key for openweather config_location: configuration location for saberbot", "from discord.ext import commands import json, requests, io, re class Weather: \"\"\"Weather class", "get_location_id(self, location, country): print(location) for item in self.locations_json: if item[\"name\"] == location: if", "or item[\"country\"]== country.upper(): return str(item[\"id\"]) return None def get_data(self, id, url_string): \"\"\"params: id", "*args): \"\"\"parses list of argument to string\"\"\" querystring = \"\" keywords = {}", "specify country=<ID>, for example !weather London country=UK\"\"\" relevant = {} location, keywords =", "country=UK\"\"\" relevant = {} location, keywords = self.parsequery(*args) if keywords: country = keywords[\"country\"]", "\"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel,", "weather by city and optionally a country usage: !weather <city>, optionally specify country=<ID>,", "bot): self.bot = bot self.conf = self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as", "country.upper(): return str(item[\"id\"]) return None def get_data(self, id, url_string): \"\"\"params: id - location", "\"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in testing for some reason? location", "print(l) location_id = self.get_location_id(location, country) if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata =", "f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I don't", "object containing json response\"\"\" response = requests.get(url_string) data = json.loads(response.text) return data def", "class Weather: \"\"\"Weather class handles weather using openweather api params: attributes: apikey: api", "location into string with spaces l = [] l.append(country) l.append(location) print(l) location_id =", "keywords: country = keywords[\"country\"] else: country = \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't", "if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id,", "list of argument to string\"\"\" querystring = \"\" keywords = {} print(args) for", "arg.split(\"=\") keywords[larg[0]] = larg[1] continue querystring += f\" {str(arg)}\" querystring = querystring.lstrip() return", "re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in testing for some reason? location = re.sub(regex, \"\",", "querystring += f\" {str(arg)}\" querystring = querystring.lstrip() return querystring, keywords def get_location_id(self, location,", "None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url) country =", "larg = arg.split(\"=\") keywords[larg[0]] = larg[1] continue querystring += f\" {str(arg)}\" querystring =", "- location id returns: data - dictionary object containing json response\"\"\" response =", "for item in self.locations_json: if item[\"name\"] == location: if not country or item[\"country\"]==", "self.get_location_id(location, country) if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url) forecastdata", "data \"\"\" def __init__(self, bot): self.bot = bot self.conf = self.bot.config[\"weather\"] self.apikey =", "if \"=\" in arg: larg = arg.split(\"=\") keywords[larg[0]] = larg[1] continue querystring +=", "work in testing for some reason? location = re.sub(regex, \"\", location) #transform location", "{\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}: today", "handles weather using openweather api params: attributes: apikey: api key for openweather config_location:", "attributes: apikey: api key for openweather config_location: configuration location for saberbot locations: json", "[] l.append(country) l.append(location) print(l) location_id = self.get_location_id(location, country) if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\"", "!weather London country=UK\"\"\" relevant = {} location, keywords = self.parsequery(*args) if keywords: country", "print(args) for arg in args: if \"=\" in arg: larg = arg.split(\"=\") keywords[larg[0]]", "def __init__(self, bot): self.bot = bot self.conf = self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with", "for arg in args: if \"=\" in arg: larg = arg.split(\"=\") keywords[larg[0]] =", "json.loads(response.text) return data def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async", "data def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self,", "reason? location = re.sub(regex, \"\", location) #transform location into string with spaces l", "commands import json, requests, io, re class Weather: \"\"\"Weather class handles weather using", "in self.locations_json: if item[\"name\"] == location: if not country or item[\"country\"]== country.upper(): return", "optionally a country usage: !weather <city>, optionally specify country=<ID>, for example !weather London", "usage: !weather <city>, optionally specify country=<ID>, for example !weather London country=UK\"\"\" relevant =", "country = \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in testing for some", "forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]}", "{} print(args) for arg in args: if \"=\" in arg: larg = arg.split(\"=\")", "country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"]", "return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self, ctx, *args): \"\"\"Search for", "def weather(self, ctx, *args): \"\"\"Search for weather by city and optionally a country", "keywords = {} print(args) for arg in args: if \"=\" in arg: larg", "{country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])}", "{} location, keywords = self.parsequery(*args) if keywords: country = keywords[\"country\"] else: country =", "item[\"name\"] == location: if not country or item[\"country\"]== country.upper(): return str(item[\"id\"]) return None", "= requests.get(url_string) data = json.loads(response.text) return data def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True)", "item[\"country\"]== country.upper(): return str(item[\"id\"]) return None def get_data(self, id, url_string): \"\"\"params: id -", "self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args):", "api key for openweather config_location: configuration location for saberbot locations: json file containing", "\"\", location) #transform location into string with spaces l = [] l.append(country) l.append(location)", "openweathermap location data \"\"\" def __init__(self, bot): self.bot = bot self.conf = self.bot.config[\"weather\"]", "\"\"\"Search for weather by city and optionally a country usage: !weather <city>, optionally", "\"=\" in arg: larg = arg.split(\"=\") keywords[larg[0]] = larg[1] continue querystring += f\"", "self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\") await", "today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C", "country=<ID>, for example !weather London country=UK\"\"\" relevant = {} location, keywords = self.parsequery(*args)", "\"\"\" def __init__(self, bot): self.bot = bot self.conf = self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"]", "self.locations_json: if item[\"name\"] == location: if not country or item[\"country\"]== country.upper(): return str(item[\"id\"])", "*args): \"\"\"Search for weather by city and optionally a country usage: !weather <city>,", "= self.get_location_id(location, country) if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url)", "= weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] =", "if item[\"name\"] == location: if not country or item[\"country\"]== country.upper(): return str(item[\"id\"]) return", "larg[1] continue querystring += f\" {str(arg)}\" querystring = querystring.lstrip() return querystring, keywords def", "args: if \"=\" in arg: larg = arg.split(\"=\") keywords[larg[0]] = larg[1] continue querystring", "testing for some reason? location = re.sub(regex, \"\", location) #transform location into string", "\"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C", "item in self.locations_json: if item[\"name\"] == location: if not country or item[\"country\"]== country.upper():", "°C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I don't know where {location}", "for some reason? location = re.sub(regex, \"\", location) #transform location into string with", "some reason? location = re.sub(regex, \"\", location) #transform location into string with spaces", "return None def get_data(self, id, url_string): \"\"\"params: id - location id returns: data", "openweather config_location: configuration location for saberbot locations: json file containing the openweathermap location", "return str(item[\"id\"]) return None def get_data(self, id, url_string): \"\"\"params: id - location id", "@commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self, ctx, *args): \"\"\"Search for weather by city", "jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses list of argument to string\"\"\"", "country or item[\"country\"]== country.upper(): return str(item[\"id\"]) return None def get_data(self, id, url_string): \"\"\"params:", "requests.get(url_string) data = json.loads(response.text) return data def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1,", "= json.loads(response.text) return data def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server)", "data = json.loads(response.text) return data def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0,", "keywords = self.parsequery(*args) if keywords: country = keywords[\"country\"] else: country = \"\" regex", "relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for {location},", "commands.BucketType.server) async def weather(self, ctx, *args): \"\"\"Search for weather by city and optionally", "for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow:", "discord from discord.ext import commands import json, requests, io, re class Weather: \"\"\"Weather", "<city>, optionally specify country=<ID>, for example !weather London country=UK\"\"\" relevant = {} location,", "json file containing the openweathermap location data \"\"\" def __init__(self, bot): self.bot =", "response = requests.get(url_string) data = json.loads(response.text) return data def CtoF(self, c): return (9/5)*c+32", "__init__(self, bot): self.bot = bot self.conf = self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"])", "location, keywords = self.parsequery(*args) if keywords: country = keywords[\"country\"] else: country = \"\"", "self.parsequery(*args) if keywords: country = keywords[\"country\"] else: country = \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\")", "forecastdata = self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"],", "location = re.sub(regex, \"\", location) #transform location into string with spaces l =", "= {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}:", "json, requests, io, re class Weather: \"\"\"Weather class handles weather using openweather api", "class handles weather using openweather api params: attributes: apikey: api key for openweather", "= keywords[\"country\"] else: country = \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in", "json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses list of argument to string\"\"\" querystring = \"\"", "regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in testing for some reason? location =", "keywords[larg[0]] = larg[1] continue querystring += f\" {str(arg)}\" querystring = querystring.lstrip() return querystring,", "in arg: larg = arg.split(\"=\") keywords[larg[0]] = larg[1] continue querystring += f\" {str(arg)}\"", "= {} print(args) for arg in args: if \"=\" in arg: larg =", "weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await", "\"\"\"params: id - location id returns: data - dictionary object containing json response\"\"\"", "by city and optionally a country usage: !weather <city>, optionally specify country=<ID>, for", "weather_url) forecastdata = self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\" :", "arg: larg = arg.split(\"=\") keywords[larg[0]] = larg[1] continue querystring += f\" {str(arg)}\" querystring", "!= None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url) country", "id returns: data - dictionary object containing json response\"\"\" response = requests.get(url_string) data", "= re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in testing for some reason? location = re.sub(regex,", "= bot self.conf = self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json", "/ {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else:", "file containing the openweathermap location data \"\"\" def __init__(self, bot): self.bot = bot", "self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self,", "discord.ext import commands import json, requests, io, re class Weather: \"\"\"Weather class handles", "location for saberbot locations: json file containing the openweathermap location data \"\"\" def", "self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses list", "location data \"\"\" def __init__(self, bot): self.bot = bot self.conf = self.bot.config[\"weather\"] self.apikey", "querystring, keywords def get_location_id(self, location, country): print(location) for item in self.locations_json: if item[\"name\"]", "None def get_data(self, id, url_string): \"\"\"params: id - location id returns: data -", "io, re class Weather: \"\"\"Weather class handles weather using openweather api params: attributes:", "await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry,", "forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])}", "if not country or item[\"country\"]== country.upper(): return str(item[\"id\"]) return None def get_data(self, id,", "London country=UK\"\"\" relevant = {} location, keywords = self.parsequery(*args) if keywords: country =", "with spaces l = [] l.append(country) l.append(location) print(l) location_id = self.get_location_id(location, country) if", "relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"],", "/ {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I don't know where {location} is\")", "location_id = self.get_location_id(location, country) if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id,", "for openweather config_location: configuration location for saberbot locations: json file containing the openweathermap", "self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" :", "= {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\"", "print(location) for item in self.locations_json: if item[\"name\"] == location: if not country or", "url_string): \"\"\"params: id - location id returns: data - dictionary object containing json", "{location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']},", "def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self, ctx,", "!weather <city>, optionally specify country=<ID>, for example !weather London country=UK\"\"\" relevant = {}", "self.conf = self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json = json.loads(jsonfile.read())", "= larg[1] continue querystring += f\" {str(arg)}\" querystring = querystring.lstrip() return querystring, keywords", "\"\" keywords = {} print(args) for arg in args: if \"=\" in arg:", "weather using openweather api params: attributes: apikey: api key for openweather config_location: configuration", "configuration location for saberbot locations: json file containing the openweathermap location data \"\"\"", "id, url_string): \"\"\"params: id - location id returns: data - dictionary object containing", "location) #transform location into string with spaces l = [] l.append(country) l.append(location) print(l)", "= [] l.append(country) l.append(location) print(l) location_id = self.get_location_id(location, country) if location_id != None:", "as jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses list of argument to", "- dictionary object containing json response\"\"\" response = requests.get(url_string) data = json.loads(response.text) return", "weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\"", ": forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}: today {relevant['today']['desc']}", "for saberbot locations: json file containing the openweathermap location data \"\"\" def __init__(self,", "location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url)", "import json, requests, io, re class Weather: \"\"\"Weather class handles weather using openweather", "= \"\" keywords = {} print(args) for arg in args: if \"=\" in", "dictionary object containing json response\"\"\" response = requests.get(url_string) data = json.loads(response.text) return data", "(9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self, ctx, *args): \"\"\"Search for weather", "= {} location, keywords = self.parsequery(*args) if keywords: country = keywords[\"country\"] else: country", "re class Weather: \"\"\"Weather class handles weather using openweather api params: attributes: apikey:", "°F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I don't know where {location} is\") def setup(bot):", "= self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json = json.loads(jsonfile.read()) def", "city and optionally a country usage: !weather <city>, optionally specify country=<ID>, for example", "spaces l = [] l.append(country) l.append(location) print(l) location_id = self.get_location_id(location, country) if location_id", "arg in args: if \"=\" in arg: larg = arg.split(\"=\") keywords[larg[0]] = larg[1]", ": forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C /", "using openweather api params: attributes: apikey: api key for openweather config_location: configuration location", "relevant = {} location, keywords = self.parsequery(*args) if keywords: country = keywords[\"country\"] else:", "{\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" :", "apikey: api key for openweather config_location: configuration location for saberbot locations: json file", "if keywords: country = keywords[\"country\"] else: country = \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_", "async def weather(self, ctx, *args): \"\"\"Search for weather by city and optionally a", "in args: if \"=\" in arg: larg = arg.split(\"=\") keywords[larg[0]] = larg[1] continue", "json response\"\"\" response = requests.get(url_string) data = json.loads(response.text) return data def CtoF(self, c):", "= self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses", "def parsequery(self, *args): \"\"\"parses list of argument to string\"\"\" querystring = \"\" keywords", "open(self.conf[\"citylist\"]) as jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses list of argument", "querystring.lstrip() return querystring, keywords def get_location_id(self, location, country): print(location) for item in self.locations_json:", "l.append(location) print(l) location_id = self.get_location_id(location, country) if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata", "self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I", "f\" {str(arg)}\" querystring = querystring.lstrip() return querystring, keywords def get_location_id(self, location, country): print(location)", ": weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]}", "forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata)", "{relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I don't know", "country usage: !weather <city>, optionally specify country=<ID>, for example !weather London country=UK\"\"\" relevant", "self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses list of argument to string\"\"\" querystring", "id - location id returns: data - dictionary object containing json response\"\"\" response", "bot self.conf = self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile: self.locations_json =", "location id returns: data - dictionary object containing json response\"\"\" response = requests.get(url_string)", "weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for", "+= f\" {str(arg)}\" querystring = querystring.lstrip() return querystring, keywords def get_location_id(self, location, country):", "{int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await", "in testing for some reason? location = re.sub(regex, \"\", location) #transform location into", "and optionally a country usage: !weather <city>, optionally specify country=<ID>, for example !weather", "= querystring.lstrip() return querystring, keywords def get_location_id(self, location, country): print(location) for item in", "weatherdata = self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"]", "await self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\")", "str(item[\"id\"]) return None def get_data(self, id, url_string): \"\"\"params: id - location id returns:", "querystring = \"\" keywords = {} print(args) for arg in args: if \"=\"", "not country or item[\"country\"]== country.upper(): return str(item[\"id\"]) return None def get_data(self, id, url_string):", "for example !weather London country=UK\"\"\" relevant = {} location, keywords = self.parsequery(*args) if", "l = [] l.append(country) l.append(location) print(l) location_id = self.get_location_id(location, country) if location_id !=", "°F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel,", "weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"]", "{int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I don't know where", "\"\"\"parses list of argument to string\"\"\" querystring = \"\" keywords = {} print(args)", "country): print(location) for item in self.locations_json: if item[\"name\"] == location: if not country", "= \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in testing for some reason?", "#transform location into string with spaces l = [] l.append(country) l.append(location) print(l) location_id", "country = keywords[\"country\"] else: country = \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work", "containing the openweathermap location data \"\"\" def __init__(self, bot): self.bot = bot self.conf", "data - dictionary object containing json response\"\"\" response = requests.get(url_string) data = json.loads(response.text)", "location: if not country or item[\"country\"]== country.upper(): return str(item[\"id\"]) return None def get_data(self,", "didn't work in testing for some reason? location = re.sub(regex, \"\", location) #transform", "forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))}", "keywords[\"country\"] else: country = \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in testing", "containing json response\"\"\" response = requests.get(url_string) data = json.loads(response.text) return data def CtoF(self,", "= self.parsequery(*args) if keywords: country = keywords[\"country\"] else: country = \"\" regex =", "self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\"", "{str(arg)}\" querystring = querystring.lstrip() return querystring, keywords def get_location_id(self, location, country): print(location) for", "= json.loads(jsonfile.read()) def parsequery(self, *args): \"\"\"parses list of argument to string\"\"\" querystring =", "= self.get_data(location_id, forecast_url) country = weatherdata[\"sys\"][\"country\"] print(weatherdata) relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\"", "print(weatherdata) relevant[\"today\"] = {\"desc\" : weatherdata[\"weather\"][0][\"description\"], \"temp\" : weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" :", "@commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self, ctx, *args): \"\"\"Search for weather by", "{int(self.CtoF(relevant['tomorrow']['temp']))} °F\") else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I don't know where {location} is\") def", "f\"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel,", "#\\W_ didn't work in testing for some reason? location = re.sub(regex, \"\", location)", "to string\"\"\" querystring = \"\" keywords = {} print(args) for arg in args:", "self.bot = bot self.conf = self.bot.config[\"weather\"] self.apikey = self.conf[\"apikey\"] with open(self.conf[\"citylist\"]) as jsonfile:", "return data def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def", "response\"\"\" response = requests.get(url_string) data = json.loads(response.text) return data def CtoF(self, c): return", "CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self, ctx, *args):", "keywords def get_location_id(self, location, country): print(location) for item in self.locations_json: if item[\"name\"] ==", "weather(self, ctx, *args): \"\"\"Search for weather by city and optionally a country usage:", "ctx, *args): \"\"\"Search for weather by city and optionally a country usage: !weather", "the openweathermap location data \"\"\" def __init__(self, bot): self.bot = bot self.conf =", "return querystring, keywords def get_location_id(self, location, country): print(location) for item in self.locations_json: if", "into string with spaces l = [] l.append(country) l.append(location) print(l) location_id = self.get_location_id(location,", "string\"\"\" querystring = \"\" keywords = {} print(args) for arg in args: if", "== location: if not country or item[\"country\"]== country.upper(): return str(item[\"id\"]) return None def", "querystring = querystring.lstrip() return querystring, keywords def get_location_id(self, location, country): print(location) for item", "for weather by city and optionally a country usage: !weather <city>, optionally specify", "example !weather London country=UK\"\"\" relevant = {} location, keywords = self.parsequery(*args) if keywords:", "else: await self.bot.send_message(ctx.message.channel, f\"Sorry, I don't know where {location} is\") def setup(bot): bot.add_cog(Weather(bot))", "import commands import json, requests, io, re class Weather: \"\"\"Weather class handles weather", "argument to string\"\"\" querystring = \"\" keywords = {} print(args) for arg in", "c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self, ctx, *args): \"\"\"Search", "continue querystring += f\" {str(arg)}\" querystring = querystring.lstrip() return querystring, keywords def get_location_id(self,", "Weather: \"\"\"Weather class handles weather using openweather api params: attributes: apikey: api key", "parsequery(self, *args): \"\"\"parses list of argument to string\"\"\" querystring = \"\" keywords =", "= arg.split(\"=\") keywords[larg[0]] = larg[1] continue querystring += f\" {str(arg)}\" querystring = querystring.lstrip()", "°C / {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F\")", "key for openweather config_location: configuration location for saberbot locations: json file containing the", "country) if location_id != None: weather_url=f\"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}\" forecast_url=f\"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}\" weatherdata = self.get_data(location_id, weather_url) forecastdata =", "returns: data - dictionary object containing json response\"\"\" response = requests.get(url_string) data =", "else: country = \"\" regex = re.compile(\"([^\\w\\s{1}]|\\d|_|\\s+)\") #\\W_ didn't work in testing for", "get_data(self, id, url_string): \"\"\"params: id - location id returns: data - dictionary object", "openweather api params: attributes: apikey: api key for openweather config_location: configuration location for", "def get_data(self, id, url_string): \"\"\"params: id - location id returns: data - dictionary", "def get_location_id(self, location, country): print(location) for item in self.locations_json: if item[\"name\"] == location:", ": weatherdata[\"main\"][\"temp\"]} relevant[\"tomorrow\"] = {\"desc\" : forecastdata[\"list\"][0][\"weather\"][0][\"description\"], \"temp\" : forecastdata[\"list\"][0][\"main\"][\"temp\"]} await self.bot.send_message(ctx.message.channel, f\"weather", "a country usage: !weather <city>, optionally specify country=<ID>, for example !weather London country=UK\"\"\"", "optionally specify country=<ID>, for example !weather London country=UK\"\"\" relevant = {} location, keywords", "{int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F\") await self.bot.send_message(ctx.message.channel, f\"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))}", "of argument to string\"\"\" querystring = \"\" keywords = {} print(args) for arg", "re.sub(regex, \"\", location) #transform location into string with spaces l = [] l.append(country)", "5.0, commands.BucketType.server) async def weather(self, ctx, *args): \"\"\"Search for weather by city and", "string with spaces l = [] l.append(country) l.append(location) print(l) location_id = self.get_location_id(location, country)" ]
[]
[ "django.contrib import admin from .models import Article class ArticleAdmin(admin.ModelAdmin): model = Article admin.site.register(Article)", "from django.contrib import admin from .models import Article class ArticleAdmin(admin.ModelAdmin): model = Article" ]
[ "data_file_path: raise Exception(f\"invalid froot: {self.froot}\") self._path = self.froot.value+self._dir print(f\"updating path, will read data", "with these extensions std_image_size = { 'SAXS': (1043, 981), 'WAXS1': (619, 487), 'WAXS2':", "from {fn}: {data.shape}') #, return an empty frame instead.') except: print(f'could not read", "filename template: {template}') for fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return", "this is used by the CBF file handler from enum import Enum class", "compatibility, fpp was always 1 before Jan 2018 #global pilatus_fpp #pilatus_fpp = 1", "self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d, stop=%d\" % (start, stop)) print(\" \",", "self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp): fn = self._template % (self._path,", "unnecessary, difference is fpp #global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode", "froot: {self.froot}\") self._path = self.froot.value+self._dir print(f\"updating path, will read data from {self._path} ...\")", "5 # this is unnecessary, difference is fpp #global pilatus_trigger_mode #global default_data_path_root #global", "'SAXS': (1043, 981), 'WAXS1': (619, 487), 'WAXS2': (1043, 981) # orignal WAXS2 was", "return an empty frame instead.') data = np.zeros(self._image_size) #print(data.shape) return data def __call__(self,", "was (619, 487) } def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF", "not correctly formatted tl = tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s', '%06d', 'SAXS', 'cbf'],", "difference is fpp #global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode =", "3 fly_scan = 4 #external_trigger_multi_frame = 5 # this is unnecessary, difference is", "#global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the cbf files have been moved", "start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path, self._filename, start, point_number)", "triggerMode.software_trigger_single_frame # assuming that the data files always have names with these extensions", "self._fpp = frame_per_point self._filename = filename self._initial_number = initial_number self._image_size = None self._default_path", "= self._default_path[len(fr.value):] return raise Exception(f\"invalid file path: {self._default_path}\") def update_path(self): # this is", "if not self.froot in data_file_path: raise Exception(f\"invalid froot: {self.froot}\") self._path = self.froot.value+self._dir print(f\"updating", "= tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called:", "'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] # resulting in file names like", "= [] tplt = self._template.replace(\"6.6d\", \"06d\") # some early templates are not correctly", "self._path = \"\" for k in self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k] if", "rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode} ...') self._template =", "template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size is None: raise Exception(f'Unrecognized data file extension", "self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path, self._filename, start, point_number) ret.append(self.get_data(fn)) return np.array(ret).squeeze() db.reg.register_handler('AD_CBF',", "{self.trigger_mode} ...') self._template = template self._fpp = frame_per_point self._filename = filename self._initial_number =", "file handler from enum import Enum class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame =", "+ 1 ret = [] tplt = self._template.replace(\"6.6d\", \"06d\") # some early templates", "substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the cbf files have been", "= img.data if data.shape!=self._image_size: print(f'got incorrect image size from {fn}: {data.shape}') #, return", "fly_scan = 4 #external_trigger_multi_frame = 5 # this is unnecessary, difference is fpp", "for {self.trigger_mode} ...') self._template = template self._fpp = frame_per_point self._filename = filename self._initial_number", "that the data files always have names with these extensions std_image_size = {", "templates are not correctly formatted tl = tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s', '%06d',", "PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs subdir = None trigger_mode", "(619, 487), 'WAXS2': (1043, 981) # orignal WAXS2 was (619, 487) } def", "is None: raise Exception(f'Unrecognized data file extension in filename template: {template}') for fr", "# resulting in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame", "# if the cbf files have been moved already #CBF_replace_data_path = False class", "pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the", "# some early templates are not correctly formatted tl = tplt.replace(\".\", \"_\").split(\"_\") #", "resulting in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and", "pilatus_fpp #pilatus_fpp = 1 # this is used by the CBF file handler", "None: self._path += f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1: fn", "487), 'WAXS2': (1043, 981) # orignal WAXS2 was (619, 487) } def __init__(self,", "image size from {fn}: {data.shape}') #, return an empty frame instead.') except: print(f'could", "== triggerMode.software_trigger_single_frame or self._fpp == 1: fn = self._template % (self._path, self._filename, self._initial_number+point_number)", "self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size is None: raise Exception(f'Unrecognized data", "self.std_image_size[k] if self._image_size is None: raise Exception(f'Unrecognized data file extension in filename template:", "1 software_trigger_multi_frame = 2 external_trigger = 3 fly_scan = 4 #external_trigger_multi_frame = 5", "{fn}: {data.shape}') #, return an empty frame instead.') except: print(f'could not read {fn},", "class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs subdir = None", "the file may not exist \"\"\" try: img = fabio.open(fn) data = img.data", "an empty frame instead.') data = np.zeros(self._image_size) #print(data.shape) return data def __call__(self, point_number):", "= start + 1 ret = [] tplt = self._template.replace(\"6.6d\", \"06d\") # some", "2018 #global pilatus_fpp #pilatus_fpp = 1 # this is used by the CBF", "data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise Exception(f\"invalid file path: {self._default_path}\") def", "std_image_size = { 'SAXS': (1043, 981), 'WAXS1': (619, 487), 'WAXS2': (1043, 981) #", "1 ret = [] tplt = self._template.replace(\"6.6d\", \"06d\") # some early templates are", "# assuming that the data files always have names with these extensions std_image_size", "/exp_path then moved to /nsls2/xf16id1/exp_path if not self.froot in data_file_path: raise Exception(f\"invalid froot:", "in range(self._fpp): fn = self._template % (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger:", "Enum class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame = 2 external_trigger = 3 fly_scan", "data.shape!=self._image_size: print(f'got incorrect image size from {fn}: {data.shape}') #, return an empty frame", "= os.path.join(rpath, '') self._path = \"\" for k in self.std_image_size: if template.find(k)>=0: self._image_size", "elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d, stop=%d\"", "this is a workaround for data that are save in /exp_path then moved", "% (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in", "+= f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1: fn = self._template", "moved to /nsls2/xf16id1/exp_path if not self.froot in data_file_path: raise Exception(f\"invalid froot: {self.froot}\") self._path", "= {'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs subdir = None trigger_mode = triggerMode.software_trigger_single_frame", "raise Exception(f\"invalid file path: {self._default_path}\") def update_path(self): # this is a workaround for", "= filename self._initial_number = initial_number self._image_size = None self._default_path = os.path.join(rpath, '') self._path", "import Enum class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame = 2 external_trigger = 3", "#CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs", "(1043, 981), 'WAXS1': (619, 487), 'WAXS2': (1043, 981) # orignal WAXS2 was (619,", "{self._default_path}\") def update_path(self): # this is a workaround for data that are save", "def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode} ...')", "self._template.replace(\"6.6d\", \"06d\") # some early templates are not correctly formatted tl = tplt.replace(\".\",", "{self.froot}\") self._path = self.froot.value+self._dir print(f\"updating path, will read data from {self._path} ...\") def", "\"\"\" try: img = fabio.open(fn) data = img.data if data.shape!=self._image_size: print(f'got incorrect image", "tplt = self._template.replace(\"6.6d\", \"06d\") # some early templates are not correctly formatted tl", "correctly formatted tl = tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s',", "e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] # resulting in", "test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: # the template needs to have", "self._dir = self._default_path[len(fr.value):] return raise Exception(f\"invalid file path: {self._default_path}\") def update_path(self): # this", "frame instead.') except: print(f'could not read {fn}, return an empty frame instead.') data", "is used by the CBF file handler from enum import Enum class triggerMode(Enum):", "'WAXS2': (1043, 981) # orignal WAXS2 was (619, 487) } def __init__(self, rpath,", "moved already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs froot", "used by the CBF file handler from enum import Enum class triggerMode(Enum): software_trigger_single_frame", "1 # this is used by the CBF file handler from enum import", "from databroker.assets.handlers_base import HandlerBase from databroker.assets.base_registry import DuplicateHandler import fabio # for backward", "trigger_mode = triggerMode.software_trigger_single_frame # assuming that the data files always have names with", "{'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs subdir = None trigger_mode = triggerMode.software_trigger_single_frame #", "if data.shape!=self._image_size: print(f'got incorrect image size from {fn}: {data.shape}') #, return an empty", "for data that are save in /exp_path then moved to /nsls2/xf16id1/exp_path if not", "= tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d',", "cbf files have been moved already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs =", "Exception(f\"invalid file path: {self._default_path}\") def update_path(self): # this is a workaround for data", "import os from databroker.assets.handlers_base import HandlerBase from databroker.assets.base_registry import DuplicateHandler import fabio #", "self._template = template self._fpp = frame_per_point self._filename = filename self._initial_number = initial_number self._image_size", "i in range(self._fpp): fn = self._template % (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif", "{fn}, return an empty frame instead.') data = np.zeros(self._image_size) #print(data.shape) return data def", "if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1]", "__call__(self, point_number): start = self._initial_number #+ point_number stop = start + 1 ret", "self._fpp) print(\" \", self._template, self._path, self._initial_number) self.update_path() if self.subdir is not None: self._path", "#pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the cbf files have been moved already #CBF_replace_data_path", "981), 'WAXS1': (619, 487), 'WAXS2': (1043, 981) # orignal WAXS2 was (619, 487)", "raise Exception(f\"invalid froot: {self.froot}\") self._path = self.froot.value+self._dir print(f\"updating path, will read data from", "data files always have names with these extensions std_image_size = { 'SAXS': (1043,", "= 1 software_trigger_multi_frame = 2 external_trigger = 3 fly_scan = 4 #external_trigger_multi_frame =", "are save in /exp_path then moved to /nsls2/xf16id1/exp_path if not self.froot in data_file_path:", "['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] # resulting in file names like test_000125_SAXS.cbf vs", "import HandlerBase from databroker.assets.base_registry import DuplicateHandler import fabio # for backward compatibility, fpp", "triggerMode.software_trigger_single_frame or self._fpp == 1: fn = self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn))", "= self.std_image_size[k] if self._image_size is None: raise Exception(f'Unrecognized data file extension in filename", "have names with these extensions std_image_size = { 'SAXS': (1043, 981), 'WAXS1': (619,", "% (start, stop)) print(\" \", self._initial_number, point_number, self._fpp) print(\" \", self._template, self._path, self._initial_number)", "(self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path, self._filename,", "self._filename = filename self._initial_number = initial_number self._image_size = None self._default_path = os.path.join(rpath, '')", "number fileds if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template", "tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler", "that are save in /exp_path then moved to /nsls2/xf16id1/exp_path if not self.froot in", "self.froot in data_file_path: raise Exception(f\"invalid froot: {self.froot}\") self._path = self.froot.value+self._dir print(f\"updating path, will", "by the CBF file handler from enum import Enum class triggerMode(Enum): software_trigger_single_frame =", "class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame = 2 external_trigger = 3 fly_scan =", "# this is unnecessary, difference is fpp #global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root", "fabio # for backward compatibility, fpp was always 1 before Jan 2018 #global", "def get_data(self, fn): \"\"\" the file may not exist \"\"\" try: img =", "= 2 external_trigger = 3 fly_scan = 4 #external_trigger_multi_frame = 5 # this", "instead.') except: print(f'could not read {fn}, return an empty frame instead.') data =", "# orignal WAXS2 was (619, 487) } def __init__(self, rpath, template, filename, frame_per_point=1,", "= self._initial_number #+ point_number stop = start + 1 ret = [] tplt", "2 external_trigger = 3 fly_scan = 4 #external_trigger_multi_frame = 5 # this is", "HandlerBase.specs froot = data_file_path.gpfs subdir = None trigger_mode = triggerMode.software_trigger_single_frame # assuming that", "'WAXS1': (619, 487), 'WAXS2': (1043, 981) # orignal WAXS2 was (619, 487) }", "{template}') for fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise Exception(f\"invalid", "# for backward compatibility, fpp was always 1 before Jan 2018 #global pilatus_fpp", "fileds if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template =", "1 before Jan 2018 #global pilatus_fpp #pilatus_fpp = 1 # this is used", "in /exp_path then moved to /nsls2/xf16id1/exp_path if not self.froot in data_file_path: raise Exception(f\"invalid", "not read {fn}, return an empty frame instead.') data = np.zeros(self._image_size) #print(data.shape) return", "self.update_path() if self.subdir is not None: self._path += f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame", "f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1: fn = self._template %", "fn = self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]:", "databroker.assets.handlers_base import HandlerBase from databroker.assets.base_registry import DuplicateHandler import fabio # for backward compatibility,", "CBF handler for {self.trigger_mode} ...') self._template = template self._fpp = frame_per_point self._filename =", "in data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise Exception(f\"invalid file path: {self._default_path}\")", "from {self._path} ...\") def get_data(self, fn): \"\"\" the file may not exist \"\"\"", "self._image_size is None: raise Exception(f'Unrecognized data file extension in filename template: {template}') for", "return data def __call__(self, point_number): start = self._initial_number #+ point_number stop = start", "tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d,", "self._template, self._path, self._initial_number) self.update_path() if self.subdir is not None: self._path += f\"{self.subdir}/\" if", "from databroker.assets.base_registry import DuplicateHandler import fabio # for backward compatibility, fpp was always", "handler for {self.trigger_mode} ...') self._template = template self._fpp = frame_per_point self._filename = filename", "specs = {'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs subdir = None trigger_mode =", "'%05d', 'cbf'] # resulting in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode", "self._default_path = os.path.join(rpath, '') self._path = \"\" for k in self.std_image_size: if template.find(k)>=0:", "= fabio.open(fn) data = img.data if data.shape!=self._image_size: print(f'got incorrect image size from {fn}:", "= template self._fpp = frame_per_point self._filename = filename self._initial_number = initial_number self._image_size =", "handler from enum import Enum class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame = 2", "#+ point_number stop = start + 1 ret = [] tplt = self._template.replace(\"6.6d\",", "'cbf'] # resulting in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode !=", "= None trigger_mode = triggerMode.software_trigger_single_frame # assuming that the data files always have", "elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp): fn = self._template %", "triggerMode.software_trigger_single_frame and self._fpp>1: # the template needs to have two number fileds if", "was always 1 before Jan 2018 #global pilatus_fpp #pilatus_fpp = 1 # this", "= \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d, stop=%d\" % (start, stop)) print(\" \", self._initial_number,", "self._path += f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1: fn =", "self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i", "is unnecessary, difference is fpp #global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path", "4 #external_trigger_multi_frame = 5 # this is unnecessary, difference is fpp #global pilatus_trigger_mode", "try: img = fabio.open(fn) data = img.data if data.shape!=self._image_size: print(f'got incorrect image size", "stop=%d\" % (start, stop)) print(\" \", self._initial_number, point_number, self._fpp) print(\" \", self._template, self._path,", "path, will read data from {self._path} ...\") def get_data(self, fn): \"\"\" the file", "| HandlerBase.specs froot = data_file_path.gpfs subdir = None trigger_mode = triggerMode.software_trigger_single_frame # assuming", "instead.') data = np.zeros(self._image_size) #print(data.shape) return data def __call__(self, point_number): start = self._initial_number", "if the cbf files have been moved already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase):", "= triggerMode.software_trigger_single_frame # assuming that the data files always have names with these", "self._initial_number = initial_number self._image_size = None self._default_path = os.path.join(rpath, '') self._path = \"\"", "self._initial_number #+ point_number stop = start + 1 ret = [] tplt =", "= initial_number self._image_size = None self._default_path = os.path.join(rpath, '') self._path = \"\" for", "WAXS2 was (619, 487) } def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing", "tl = tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d, stop=%d\" % (start,", "#global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the cbf files have", "in filename template: {template}') for fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):]", "range(self._fpp): fn = self._template % (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn", "ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path, self._filename, start, point_number) ret.append(self.get_data(fn)) return", "img = fabio.open(fn) data = img.data if data.shape!=self._image_size: print(f'got incorrect image size from", "from enum import Enum class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame = 2 external_trigger", "#pilatus_fpp = 1 # this is used by the CBF file handler from", "a workaround for data that are save in /exp_path then moved to /nsls2/xf16id1/exp_path", "{self._path} ...\") def get_data(self, fn): \"\"\" the file may not exist \"\"\" try:", "in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp): fn = self._template % (self._path, self._filename,", "False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs subdir =", "= 4 #external_trigger_multi_frame = 5 # this is unnecessary, difference is fpp #global", "data def __call__(self, point_number): start = self._initial_number #+ point_number stop = start +", "['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] # resulting in file", "fabio.open(fn) data = img.data if data.shape!=self._image_size: print(f'got incorrect image size from {fn}: {data.shape}')", "triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame = 2 external_trigger = 3 fly_scan = 4", "} def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode}", "def update_path(self): # this is a workaround for data that are save in", "test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: # the template needs", "os from databroker.assets.handlers_base import HandlerBase from databroker.assets.base_registry import DuplicateHandler import fabio # for", "update_path(self): # this is a workaround for data that are save in /exp_path", "before Jan 2018 #global pilatus_fpp #pilatus_fpp = 1 # this is used by", "already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs froot =", "this is unnecessary, difference is fpp #global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global", "None trigger_mode = triggerMode.software_trigger_single_frame # assuming that the data files always have names", "frame_per_point self._filename = filename self._initial_number = initial_number self._image_size = None self._default_path = os.path.join(rpath,", "print(f'Initializing CBF handler for {self.trigger_mode} ...') self._template = template self._fpp = frame_per_point self._filename", "default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the cbf files", "triggerMode.fly_scan]: for i in range(self._fpp): fn = self._template % (self._path, self._filename, start, point_number+i)", "point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path, self._filename, start, point_number) ret.append(self.get_data(fn))", "file extension in filename template: {template}') for fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir", "#print(data.shape) return data def __call__(self, point_number): start = self._initial_number #+ point_number stop =", "\"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d, stop=%d\" % (start, stop)) print(\" \", self._initial_number, point_number,", "data = img.data if data.shape!=self._image_size: print(f'got incorrect image size from {fn}: {data.shape}') #,", "the data files always have names with these extensions std_image_size = { 'SAXS':", "#global pilatus_fpp #pilatus_fpp = 1 # this is used by the CBF file", "HandlerBase from databroker.assets.base_registry import DuplicateHandler import fabio # for backward compatibility, fpp was", "print(\" \", self._template, self._path, self._initial_number) self.update_path() if self.subdir is not None: self._path +=", "early templates are not correctly formatted tl = tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s',", "size from {fn}: {data.shape}') #, return an empty frame instead.') except: print(f'could not", "been moved already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs", "subdir = None trigger_mode = triggerMode.software_trigger_single_frame # assuming that the data files always", "to /nsls2/xf16id1/exp_path if not self.froot in data_file_path: raise Exception(f\"invalid froot: {self.froot}\") self._path =", "read {fn}, return an empty frame instead.') data = np.zeros(self._image_size) #print(data.shape) return data", "1: fn = self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame,", "os.path.join(rpath, '') self._path = \"\" for k in self.std_image_size: if template.find(k)>=0: self._image_size =", "data = np.zeros(self._image_size) #print(data.shape) return data def __call__(self, point_number): start = self._initial_number #+", "= 1 # this is used by the CBF file handler from enum", "not exist \"\"\" try: img = fabio.open(fn) data = img.data if data.shape!=self._image_size: print(f'got", "point_number stop = start + 1 ret = [] tplt = self._template.replace(\"6.6d\", \"06d\")", "[triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp): fn = self._template % (self._path, self._filename, start,", "{data.shape}') #, return an empty frame instead.') except: print(f'could not read {fn}, return", "not None: self._path += f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1:", "called: start=%d, stop=%d\" % (start, stop)) print(\" \", self._initial_number, point_number, self._fpp) print(\" \",", "print(f'got incorrect image size from {fn}: {data.shape}') #, return an empty frame instead.')", "filename self._initial_number = initial_number self._image_size = None self._default_path = os.path.join(rpath, '') self._path =", "file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: #", "(1043, 981) # orignal WAXS2 was (619, 487) } def __init__(self, rpath, template,", "#global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the cbf", "to have two number fileds if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl", "read data from {self._path} ...\") def get_data(self, fn): \"\"\" the file may not", "!= triggerMode.software_trigger_single_frame and self._fpp>1: # the template needs to have two number fileds", "'') self._path = \"\" for k in self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k]", "\"_\").split(\"_\") # e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] #", "return an empty frame instead.') except: print(f'could not read {fn}, return an empty", "enum import Enum class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame = 2 external_trigger =", "initial_number self._image_size = None self._default_path = os.path.join(rpath, '') self._path = \"\" for k", "ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp): fn = self._template", "= \"\" for k in self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size", "if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1: fn = self._template % (self._path,", "fn): \"\"\" the file may not exist \"\"\" try: img = fabio.open(fn) data", "fn = self._template % (self._path, self._filename, start, point_number) ret.append(self.get_data(fn)) return np.array(ret).squeeze() db.reg.register_handler('AD_CBF', PilatusCBFHandler,", "empty frame instead.') data = np.zeros(self._image_size) #print(data.shape) return data def __call__(self, point_number): start", "self._fpp>1: # the template needs to have two number fileds if len(tl)==4: tl", "None self._default_path = os.path.join(rpath, '') self._path = \"\" for k in self.std_image_size: if", "in self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size is None: raise Exception(f'Unrecognized", "the cbf files have been moved already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs", "empty frame instead.') except: print(f'could not read {fn}, return an empty frame instead.')", "...\") def get_data(self, fn): \"\"\" the file may not exist \"\"\" try: img", "froot = data_file_path.gpfs subdir = None trigger_mode = triggerMode.software_trigger_single_frame # assuming that the", "files have been moved already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'}", "'SAXS', '%05d', 'cbf'] # resulting in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if", "databroker.assets.base_registry import DuplicateHandler import fabio # for backward compatibility, fpp was always 1", "have two number fileds if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl =", "# e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] # resulting", "incorrect image size from {fn}: {data.shape}') #, return an empty frame instead.') except:", "= None self._default_path = os.path.join(rpath, '') self._path = \"\" for k in self.std_image_size:", "tl = tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS',", "self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: # the template needs to have two number", "files always have names with these extensions std_image_size = { 'SAXS': (1043, 981),", "= self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for", "{ 'SAXS': (1043, 981), 'WAXS1': (619, 487), 'WAXS2': (1043, 981) # orignal WAXS2", "point_number, self._fpp) print(\" \", self._template, self._path, self._initial_number) self.update_path() if self.subdir is not None:", "is fpp #global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame", "= 3 fly_scan = 4 #external_trigger_multi_frame = 5 # this is unnecessary, difference", "like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: # the template", "if self.subdir is not None: self._path += f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame or", "= np.zeros(self._image_size) #print(data.shape) return data def __call__(self, point_number): start = self._initial_number #+ point_number", "= self._template.replace(\"6.6d\", \"06d\") # some early templates are not correctly formatted tl =", "print(f'could not read {fn}, return an empty frame instead.') data = np.zeros(self._image_size) #print(data.shape)", "np.zeros(self._image_size) #print(data.shape) return data def __call__(self, point_number): start = self._initial_number #+ point_number stop", "for i in range(self._fpp): fn = self._template % (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn))", "# this is used by the CBF file handler from enum import Enum", "= self._template % (self._path, self._filename, start, point_number) ret.append(self.get_data(fn)) return np.array(ret).squeeze() db.reg.register_handler('AD_CBF', PilatusCBFHandler, overwrite=True)", "data file extension in filename template: {template}') for fr in data_file_path: if self._default_path.find(fr.value)==0:", "save in /exp_path then moved to /nsls2/xf16id1/exp_path if not self.froot in data_file_path: raise", "487) } def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for", "len(tl)==5: tl = tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d, stop=%d\" %", "the CBF file handler from enum import Enum class triggerMode(Enum): software_trigger_single_frame = 1", "k in self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size is None: raise", "formatted tl = tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d',", "extension in filename template: {template}') for fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir =", "= { 'SAXS': (1043, 981), 'WAXS1': (619, 487), 'WAXS2': (1043, 981) # orignal", "None: raise Exception(f'Unrecognized data file extension in filename template: {template}') for fr in", "self._fpp == 1: fn = self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode", "needs to have two number fileds if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5:", "elif self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path, self._filename, start, point_number) ret.append(self.get_data(fn)) return np.array(ret).squeeze()", "if template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size is None: raise Exception(f'Unrecognized data file", "CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the cbf files have been moved already", "if self._image_size is None: raise Exception(f'Unrecognized data file extension in filename template: {template}')", "if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: # the template needs to have two", "\", self._template, self._path, self._initial_number) self.update_path() if self.subdir is not None: self._path += f\"{self.subdir}/\"", "template needs to have two number fileds if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif", "(619, 487) } def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler", "\"06d\") # some early templates are not correctly formatted tl = tplt.replace(\".\", \"_\").split(\"_\")", "981) # orignal WAXS2 was (619, 487) } def __init__(self, rpath, template, filename,", "names with these extensions std_image_size = { 'SAXS': (1043, 981), 'WAXS1': (619, 487),", "= frame_per_point self._filename = filename self._initial_number = initial_number self._image_size = None self._default_path =", "and self._fpp>1: # the template needs to have two number fileds if len(tl)==4:", "= data_file_path.gpfs subdir = None trigger_mode = triggerMode.software_trigger_single_frame # assuming that the data", "self._default_path[len(fr.value):] return raise Exception(f\"invalid file path: {self._default_path}\") def update_path(self): # this is a", "point_number): start = self._initial_number #+ point_number stop = start + 1 ret =", "def __call__(self, point_number): start = self._initial_number #+ point_number stop = start + 1", "for fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise Exception(f\"invalid file", "for backward compatibility, fpp was always 1 before Jan 2018 #global pilatus_fpp #pilatus_fpp", "DuplicateHandler import fabio # for backward compatibility, fpp was always 1 before Jan", "/nsls2/xf16id1/exp_path if not self.froot in data_file_path: raise Exception(f\"invalid froot: {self.froot}\") self._path = self.froot.value+self._dir", "(self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp):", "Jan 2018 #global pilatus_fpp #pilatus_fpp = 1 # this is used by the", "these extensions std_image_size = { 'SAXS': (1043, 981), 'WAXS1': (619, 487), 'WAXS2': (1043,", "have been moved already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} |", "import fabio # for backward compatibility, fpp was always 1 before Jan 2018", "img.data if data.shape!=self._image_size: print(f'got incorrect image size from {fn}: {data.shape}') #, return an", "CBF file handler from enum import Enum class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame", "self.froot.value+self._dir print(f\"updating path, will read data from {self._path} ...\") def get_data(self, fn): \"\"\"", "will read data from {self._path} ...\") def get_data(self, fn): \"\"\" the file may", "assuming that the data files always have names with these extensions std_image_size =", "start = self._initial_number #+ point_number stop = start + 1 ret = []", "tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf']", "always 1 before Jan 2018 #global pilatus_fpp #pilatus_fpp = 1 # this is", "start + 1 ret = [] tplt = self._template.replace(\"6.6d\", \"06d\") # some early", "print(\" \", self._initial_number, point_number, self._fpp) print(\" \", self._template, self._path, self._initial_number) self.update_path() if self.subdir", "self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1: fn = self._template % (self._path, self._filename,", "self._path, self._initial_number) self.update_path() if self.subdir is not None: self._path += f\"{self.subdir}/\" if self.trigger_mode", "self._initial_number) self.update_path() if self.subdir is not None: self._path += f\"{self.subdir}/\" if self.trigger_mode ==", "self.subdir is not None: self._path += f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp", "workaround for data that are save in /exp_path then moved to /nsls2/xf16id1/exp_path if", "not self.froot in data_file_path: raise Exception(f\"invalid froot: {self.froot}\") self._path = self.froot.value+self._dir print(f\"updating path,", "except: print(f'could not read {fn}, return an empty frame instead.') data = np.zeros(self._image_size)", "ret = [] tplt = self._template.replace(\"6.6d\", \"06d\") # some early templates are not", "two number fileds if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:]", "are not correctly formatted tl = tplt.replace(\".\", \"_\").split(\"_\") # e.g. ['%s%s', '%06d', 'SAXS',", "\", self._initial_number, point_number, self._fpp) print(\" \", self._template, self._path, self._initial_number) self.update_path() if self.subdir is", "# the template needs to have two number fileds if len(tl)==4: tl =", "orignal WAXS2 was (619, 487) } def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1):", "print(f\"updating path, will read data from {self._path} ...\") def get_data(self, fn): \"\"\" the", "self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path, self._filename, start,", "= triggerMode.software_trigger_single_frame # if the cbf files have been moved already #CBF_replace_data_path =", "triggerMode.software_trigger_single_frame # if the cbf files have been moved already #CBF_replace_data_path = False", "# this is a workaround for data that are save in /exp_path then", "self._path = self.froot.value+self._dir print(f\"updating path, will read data from {self._path} ...\") def get_data(self,", "frame instead.') data = np.zeros(self._image_size) #print(data.shape) return data def __call__(self, point_number): start =", "% (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path,", "template: {template}') for fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise", "self._image_size = self.std_image_size[k] if self._image_size is None: raise Exception(f'Unrecognized data file extension in", "is a workaround for data that are save in /exp_path then moved to", "(start, stop)) print(\" \", self._initial_number, point_number, self._fpp) print(\" \", self._template, self._path, self._initial_number) self.update_path()", "return raise Exception(f\"invalid file path: {self._default_path}\") def update_path(self): # this is a workaround", "= tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d, stop=%d\" % (start, stop))", "...') self._template = template self._fpp = frame_per_point self._filename = filename self._initial_number = initial_number", "fn = self._template % (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn =", "stop)) print(\" \", self._initial_number, point_number, self._fpp) print(\" \", self._template, self._path, self._initial_number) self.update_path() if", "or self._fpp == 1: fn = self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif", "get_data(self, fn): \"\"\" the file may not exist \"\"\" try: img = fabio.open(fn)", "\"\"\" the file may not exist \"\"\" try: img = fabio.open(fn) data =", "[] tplt = self._template.replace(\"6.6d\", \"06d\") # some early templates are not correctly formatted", "= self._template % (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template", "extensions std_image_size = { 'SAXS': (1043, 981), 'WAXS1': (619, 487), 'WAXS2': (1043, 981)", "data_file_path.gpfs subdir = None trigger_mode = triggerMode.software_trigger_single_frame # assuming that the data files", "some early templates are not correctly formatted tl = tplt.replace(\".\", \"_\").split(\"_\") # e.g.", "file path: {self._default_path}\") def update_path(self): # this is a workaround for data that", "external_trigger = 3 fly_scan = 4 #external_trigger_multi_frame = 5 # this is unnecessary,", "data that are save in /exp_path then moved to /nsls2/xf16id1/exp_path if not self.froot", "start=%d, stop=%d\" % (start, stop)) print(\" \", self._initial_number, point_number, self._fpp) print(\" \", self._template,", "template self._fpp = frame_per_point self._filename = filename self._initial_number = initial_number self._image_size = None", "#, return an empty frame instead.') except: print(f'could not read {fn}, return an", "an empty frame instead.') except: print(f'could not read {fn}, return an empty frame", "software_trigger_multi_frame = 2 external_trigger = 3 fly_scan = 4 #external_trigger_multi_frame = 5 #", "'%06d', 'SAXS', '%05d', 'cbf'] # resulting in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf", "path: {self._default_path}\") def update_path(self): # this is a workaround for data that are", "self._template % (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template %", "self._initial_number, point_number, self._fpp) print(\" \", self._template, self._path, self._initial_number) self.update_path() if self.subdir is not", "__init__(self, rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode} ...') self._template", "'%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] # resulting in file names", "print(\"CBF handler called: start=%d, stop=%d\" % (start, stop)) print(\" \", self._initial_number, point_number, self._fpp)", "Exception(f\"invalid froot: {self.froot}\") self._path = self.froot.value+self._dir print(f\"updating path, will read data from {self._path}", "always have names with these extensions std_image_size = { 'SAXS': (1043, 981), 'WAXS1':", "len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF", "#global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if", "== 1: fn = self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in", "stop = start + 1 ret = [] tplt = self._template.replace(\"6.6d\", \"06d\") #", "= False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs subdir", "fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise Exception(f\"invalid file path:", "#external_trigger_multi_frame = 5 # this is unnecessary, difference is fpp #global pilatus_trigger_mode #global", "frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode} ...') self._template = template self._fpp =", "names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: # the", "self._image_size = None self._default_path = os.path.join(rpath, '') self._path = \"\" for k in", "for k in self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size is None:", "= self.froot.value+self._dir print(f\"updating path, will read data from {self._path} ...\") def get_data(self, fn):", "if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise Exception(f\"invalid file path: {self._default_path}\") def update_path(self):", "exist \"\"\" try: img = fabio.open(fn) data = img.data if data.shape!=self._image_size: print(f'got incorrect", "backward compatibility, fpp was always 1 before Jan 2018 #global pilatus_fpp #pilatus_fpp =", "in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1:", "= 5 # this is unnecessary, difference is fpp #global pilatus_trigger_mode #global default_data_path_root", "data from {self._path} ...\") def get_data(self, fn): \"\"\" the file may not exist", "handler called: start=%d, stop=%d\" % (start, stop)) print(\" \", self._initial_number, point_number, self._fpp) print(\"", "filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode} ...') self._template = template self._fpp", "in data_file_path: raise Exception(f\"invalid froot: {self.froot}\") self._path = self.froot.value+self._dir print(f\"updating path, will read", "then moved to /nsls2/xf16id1/exp_path if not self.froot in data_file_path: raise Exception(f\"invalid froot: {self.froot}\")", "tl[:-2]+tl[-1:] self._template = \"_\".join(tl[:-1])+\".\"+tl[-1] print(\"CBF handler called: start=%d, stop=%d\" % (start, stop)) print(\"", "self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp): fn =", "is not None: self._path += f\"{self.subdir}/\" if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp ==", "vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: # the template needs to", "fpp #global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame #", "'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] # resulting in file names like test_000125_SAXS.cbf", "Exception(f'Unrecognized data file extension in filename template: {template}') for fr in data_file_path: if", "the template needs to have two number fileds if len(tl)==4: tl = tl[:-1]+[\"%05d\"]+tl[-1:]", "fpp was always 1 before Jan 2018 #global pilatus_fpp #pilatus_fpp = 1 #", "template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode} ...') self._template = template", "software_trigger_single_frame = 1 software_trigger_multi_frame = 2 external_trigger = 3 fly_scan = 4 #external_trigger_multi_frame", "\"\" for k in self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size is", "file may not exist \"\"\" try: img = fabio.open(fn) data = img.data if", "may not exist \"\"\" try: img = fabio.open(fn) data = img.data if data.shape!=self._image_size:", "self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp): fn", "raise Exception(f'Unrecognized data file extension in filename template: {template}') for fr in data_file_path:", "self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise Exception(f\"invalid file path: {self._default_path}\") def update_path(self): #", "initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode} ...') self._template = template self._fpp = frame_per_point", "import DuplicateHandler import fabio # for backward compatibility, fpp was always 1 before" ]
[ "PetPhotoSerializer(many=True, read_only=True) class Meta: model = Pet fields = (\"id\", \"name\", \"age\", \"type\",", "serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of pet IDs.\"\"\" ids = serializers.ListField( child=serializers.IntegerField(min_value=1), allow_empty=False )", "\"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded pet photo.\"\"\" file =", "import serializers from api.models import Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\"", "class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\" url = serializers.ImageField(source=\"photo\") class Meta: model =", "fields = (\"id\", \"url\") def to_internal_value(self, data): resource_data = data[\"file\"] return super().to_internal_value(resource_data) class", "pet photos.\"\"\" url = serializers.ImageField(source=\"photo\") class Meta: model = PetPhoto fields = (\"id\",", "\"age\", \"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded pet photo.\"\"\" file", "\"\"\"Serialization of pet photos.\"\"\" url = serializers.ImageField(source=\"photo\") class Meta: model = PetPhoto fields", "Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\" url = serializers.ImageField(source=\"photo\") class Meta:", "<reponame>V-Holodov/pets_accounting from rest_framework import serializers from api.models import Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization", "photos = PetPhotoSerializer(many=True, read_only=True) class Meta: model = Pet fields = (\"id\", \"name\",", "model = Pet fields = (\"id\", \"name\", \"age\", \"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer):", "\"name\", \"age\", \"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded pet photo.\"\"\"", "pet photo.\"\"\" file = serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of pet IDs.\"\"\" ids =", "= data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True) class", "rest_framework import serializers from api.models import Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet", "url = serializers.ImageField(source=\"photo\") class Meta: model = PetPhoto fields = (\"id\", \"url\") def", "data): resource_data = data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True,", "class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True) class Meta: model = Pet", "photos.\"\"\" url = serializers.ImageField(source=\"photo\") class Meta: model = PetPhoto fields = (\"id\", \"url\")", "Meta: model = Pet fields = (\"id\", \"name\", \"age\", \"type\", \"photos\", \"created_at\") class", "data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True) class Meta:", "fields = (\"id\", \"name\", \"age\", \"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the", "api.models import Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\" url = serializers.ImageField(source=\"photo\")", "PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\" url = serializers.ImageField(source=\"photo\") class Meta: model = PetPhoto", "\"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True) class Meta: model = Pet fields =", "PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\" url = serializers.ImageField(source=\"photo\") class Meta: model", "to_internal_value(self, data): resource_data = data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos =", "\"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded pet photo.\"\"\" file = serializers.ImageField()", "import Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\" url = serializers.ImageField(source=\"photo\") class", "= PetPhotoSerializer(many=True, read_only=True) class Meta: model = Pet fields = (\"id\", \"name\", \"age\",", "class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded pet photo.\"\"\" file = serializers.ImageField() class IdsSerializer(serializers.Serializer):", "Meta: model = PetPhoto fields = (\"id\", \"url\") def to_internal_value(self, data): resource_data =", "PetPhoto fields = (\"id\", \"url\") def to_internal_value(self, data): resource_data = data[\"file\"] return super().to_internal_value(resource_data)", "class Meta: model = Pet fields = (\"id\", \"name\", \"age\", \"type\", \"photos\", \"created_at\")", "super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True) class Meta: model =", "Pet fields = (\"id\", \"name\", \"age\", \"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of", "resource_data = data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True)", "serializers from api.models import Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\" url", "from api.models import Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of pet photos.\"\"\" url =", "of pet photos.\"\"\" url = serializers.ImageField(source=\"photo\") class Meta: model = PetPhoto fields =", "= serializers.ImageField(source=\"photo\") class Meta: model = PetPhoto fields = (\"id\", \"url\") def to_internal_value(self,", "serializers.ImageField(source=\"photo\") class Meta: model = PetPhoto fields = (\"id\", \"url\") def to_internal_value(self, data):", "read_only=True) class Meta: model = Pet fields = (\"id\", \"name\", \"age\", \"type\", \"photos\",", "of the uploaded pet photo.\"\"\" file = serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of pet", "= serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of pet IDs.\"\"\" ids = serializers.ListField( child=serializers.IntegerField(min_value=1), allow_empty=False", "\"url\") def to_internal_value(self, data): resource_data = data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\"", "from rest_framework import serializers from api.models import Pet, PetPhoto class PetPhotoSerializer(serializers.ModelSerializer): \"\"\"Serialization of", "the uploaded pet photo.\"\"\" file = serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of pet IDs.\"\"\"", "PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded pet photo.\"\"\" file = serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization", "PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True) class Meta: model = Pet fields", "file = serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of pet IDs.\"\"\" ids = serializers.ListField( child=serializers.IntegerField(min_value=1),", "= Pet fields = (\"id\", \"name\", \"age\", \"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization", "return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True) class Meta: model", "photo.\"\"\" file = serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of pet IDs.\"\"\" ids = serializers.ListField(", "sterilization.\"\"\" photos = PetPhotoSerializer(many=True, read_only=True) class Meta: model = Pet fields = (\"id\",", "\"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded pet photo.\"\"\" file = serializers.ImageField() class", "\"\"\"Deserialization of the uploaded pet photo.\"\"\" file = serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of", "class Meta: model = PetPhoto fields = (\"id\", \"url\") def to_internal_value(self, data): resource_data", "= (\"id\", \"url\") def to_internal_value(self, data): resource_data = data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer):", "uploaded pet photo.\"\"\" file = serializers.ImageField() class IdsSerializer(serializers.Serializer): \"\"\"Deserialization of pet IDs.\"\"\" ids", "model = PetPhoto fields = (\"id\", \"url\") def to_internal_value(self, data): resource_data = data[\"file\"]", "(\"id\", \"name\", \"age\", \"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded pet", "= (\"id\", \"name\", \"age\", \"type\", \"photos\", \"created_at\") class PhotoLoadSerializer(serializers.Serializer): \"\"\"Deserialization of the uploaded", "def to_internal_value(self, data): resource_data = data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet sterilization.\"\"\" photos", "= PetPhoto fields = (\"id\", \"url\") def to_internal_value(self, data): resource_data = data[\"file\"] return", "(\"id\", \"url\") def to_internal_value(self, data): resource_data = data[\"file\"] return super().to_internal_value(resource_data) class PetSerializer(serializers.ModelSerializer): \"\"\"Pet" ]
[ "self.mean image_tag = prefix + '/' + 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag =", "embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a =", "= writer def update(self, name: str, val, n=1): self.average_meters[name].update(val, n) def reset(self): for", "= torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self, images, reconstruction, step, prefix): images =", "2, 0).numpy()) plt.show() class AccumulateStats: def __enter__(self): pass def __exit__(self): pass def __call__(self):", "self.avg = self.sum / self.count class MeterLogger: def __init__(self, meters: Tuple[str], writer: SummaryWriter):", "- torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2", "SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4:", "prefix): for name, meter in self.average_meters.items(): tag = prefix + '/' + name", "step): embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights:", "and current value \"\"\" def __init__(self): self.val = 0 self.avg = 0 self.sum", "__enter__(self): pass def __exit__(self): pass def __call__(self): pass class AverageMeter(object): \"\"\" Computes and", "self.val = val self.sum += val * n self.count += n self.avg =", "pass def __call__(self): pass class AverageMeter(object): \"\"\" Computes and stores the average and", "AverageMeter] = {k: AverageMeter() for k in meters} self._writer = writer def update(self,", "None: images = images * self.std + self.mean reconstruction = reconstruction * self.std", "mean=None, std=None): self._writer = writer self.mean = mean self.std = std if self.mean", "0 self.count = 0 def update(self, val, n=1): self.val = val self.sum +=", "int, cuda: bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.random.manual_seed(seed)", "def __init__(self, writer: SummaryWriter): self._writer = writer def write(self, embeddings, step): embeddings =", "plt try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION", "images = images * self.std + self.mean reconstruction = reconstruction * self.std +", "writer self.mean = mean self.std = std if self.mean is not None: self.mean", "val * n self.count += n self.avg = self.sum / self.count class MeterLogger:", "torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a + b def set_random_seed(seed: int, cuda: bool =", "return a + b def set_random_seed(seed: int, cuda: bool = False): random.seed(seed) np.random.seed(seed)", "not None and self.std is not None: images = images * self.std +", "= 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum", "random import numpy as np import torch from torchvision import datasets, transforms from", "= prefix + '/' + name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def __init__(self,", "name, meter in self.average_meters.items(): tag = prefix + '/' + name self._writer.add_scalar(tag, meter.avg,", "torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION = { 0:", "__init__(self, writer: SummaryWriter, mean=None, std=None): self._writer = writer self.mean = mean self.std =", "meter.reset() def write(self, step, prefix): for name, meter in self.average_meters.items(): tag = prefix", "torchvision import datasets, transforms from sklearn.metrics.pairwise import cosine_distances from matplotlib import pyplot as", "= val self.sum += val * n self.count += n self.avg = self.sum", "'/' + 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix + '/' + 'reconstruction'", "import Tuple, Dict import random import numpy as np import torch from torchvision", "= prefix + '/' + 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix +", "step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) **", "= reconstruction * self.std + self.mean image_tag = prefix + '/' + 'image'", "Tuple, Dict import random import numpy as np import torch from torchvision import", "import datasets, transforms from sklearn.metrics.pairwise import cosine_distances from matplotlib import pyplot as plt", "is not None and self.std is not None: images = images * self.std", "def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count =", "= torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a + b def", "weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a + b def set_random_seed(seed: int, cuda:", "if self.mean is not None and self.std is not None: images = images", "double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b =", "prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def __init__(self, writer:", "__call__(self): pass class AverageMeter(object): \"\"\" Computes and stores the average and current value", "= 0 self.count = 0 def reset(self): self.val = 0 self.avg = 0", "n self.count += n self.avg = self.sum / self.count class MeterLogger: def __init__(self,", "str, val, n=1): self.average_meters[name].update(val, n) def reset(self): for meter in self.average_meters.values(): meter.reset() def", "writer def update(self, name: str, val, n=1): self.average_meters[name].update(val, n) def reset(self): for meter", "def __call__(self): pass class AverageMeter(object): \"\"\" Computes and stores the average and current", "def __init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for", "+ '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter):", "9: 'truck' } def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats:", "** 2 b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a", "0).numpy()) plt.show() class AccumulateStats: def __enter__(self): pass def __exit__(self): pass def __call__(self): pass", "1, 1) if self.std is not None: self.std = torch.tensor(self.std).reshape(1, 3, 1, 1)", "+= val * n self.count += n self.avg = self.sum / self.count class", "Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k in meters}", "self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std is not None: self.std =", "from torchvision import datasets, transforms from sklearn.metrics.pairwise import cosine_distances from matplotlib import pyplot", "= { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5:", "reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0", "class MeterLogger: def __init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k:", "= embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a", "b def set_random_seed(seed: int, cuda: bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if cuda:", "np import torch from torchvision import datasets, transforms from sklearn.metrics.pairwise import cosine_distances from", "'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship',", "numpy as np import torch from torchvision import datasets, transforms from sklearn.metrics.pairwise import", "= mean self.std = std if self.mean is not None: self.mean = torch.tensor(self.mean).reshape(1,", "pass class AverageMeter(object): \"\"\" Computes and stores the average and current value \"\"\"", "1) if self.std is not None: self.std = torch.tensor(self.std).reshape(1, 3, 1, 1) def", "= 0 self.avg = 0 self.sum = 0 self.count = 0 def reset(self):", "self.count class MeterLogger: def __init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] =", "'horse', 8: 'ship', 9: 'truck' } def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy())", "torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self, images, reconstruction, step, prefix): images = images.cpu()", "= False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.random.manual_seed(seed) torch.backends.cudnn.deterministic = True", "reconstruction * self.std + self.mean image_tag = prefix + '/' + 'image' self._writer.add_images(image_tag,", "'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck' } def", "from matplotlib import pyplot as plt try: from torch.utils.tensorboard import SummaryWriter except ImportError:", "1) def write(self, images, reconstruction, step, prefix): images = images.cpu() reconstruction = reconstruction.cpu()", "self.std + self.mean image_tag = prefix + '/' + 'image' self._writer.add_images(image_tag, images, step)", "+ self.mean reconstruction = reconstruction * self.std + self.mean image_tag = prefix +", "import SummaryWriter except ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane',", "images = images.cpu() reconstruction = reconstruction.cpu() if self.mean is not None and self.std", "reconstruction, step, prefix): images = images.cpu() reconstruction = reconstruction.cpu() if self.mean is not", "not None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std is not None:", "write(self, images, reconstruction, step, prefix): images = images.cpu() reconstruction = reconstruction.cpu() if self.mean", "2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8:", "** 2 return a + b def set_random_seed(seed: int, cuda: bool = False):", "* n self.count += n self.avg = self.sum / self.count class MeterLogger: def", "= prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def __init__(self,", "in self.average_meters.values(): meter.reset() def write(self, step, prefix): for name, meter in self.average_meters.items(): tag", "writer: SummaryWriter, mean=None, std=None): self._writer = writer self.mean = mean self.std = std", "0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count", "writer def write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim,", "{k: AverageMeter() for k in meters} self._writer = writer def update(self, name: str,", "SummaryWriter): self._writer = writer def write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim =", "CIFAR10_ANNOTATION = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer',", "torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a + b def set_random_seed(seed:", "SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k in meters} self._writer =", "meter in self.average_meters.values(): meter.reset() def write(self, step, prefix): for name, meter in self.average_meters.items():", "n=1): self.average_meters[name].update(val, n) def reset(self): for meter in self.average_meters.values(): meter.reset() def write(self, step,", "+ b def set_random_seed(seed: int, cuda: bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if", "'frog', 7: 'horse', 8: 'ship', 9: 'truck' } def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1,", "k in meters} self._writer = writer def update(self, name: str, val, n=1): self.average_meters[name].update(val,", "from sklearn.metrics.pairwise import cosine_distances from matplotlib import pyplot as plt try: from torch.utils.tensorboard", "def update(self, name: str, val, n=1): self.average_meters[name].update(val, n) def reset(self): for meter in", "self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @ weights.t() -", "def __exit__(self): pass def __call__(self): pass class AverageMeter(object): \"\"\" Computes and stores the", "self._writer = writer def write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings)", "and stores the average and current value \"\"\" def __init__(self): self.val = 0", "meters} self._writer = writer def update(self, name: str, val, n=1): self.average_meters[name].update(val, n) def", "import torch from torchvision import datasets, transforms from sklearn.metrics.pairwise import cosine_distances from matplotlib", "cosine_distances from matplotlib import pyplot as plt try: from torch.utils.tensorboard import SummaryWriter except", "self.sum = 0 self.count = 0 def reset(self): self.val = 0 self.avg =", "val, n=1): self.val = val self.sum += val * n self.count += n", "meter in self.average_meters.items(): tag = prefix + '/' + name self._writer.add_scalar(tag, meter.avg, step)", "write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW')", "self.count = 0 def update(self, val, n=1): self.val = val self.sum += val", "if self.mean is not None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std", "prefix): images = images.cpu() reconstruction = reconstruction.cpu() if self.mean is not None and", "images, reconstruction, step, prefix): images = images.cpu() reconstruction = reconstruction.cpu() if self.mean is", "import SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat',", "= 0 self.sum = 0 self.count = 0 def reset(self): self.val = 0", "AverageMeter() for k in meters} self._writer = writer def update(self, name: str, val,", "plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats: def __enter__(self): pass def __exit__(self): pass", "n) def reset(self): for meter in self.average_meters.values(): meter.reset() def write(self, step, prefix): for", "tensorboardX import SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3:", "* self.std + self.mean image_tag = prefix + '/' + 'image' self._writer.add_images(image_tag, images,", "= std if self.mean is not None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1)", "weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) **", "def reset(self): for meter in self.average_meters.values(): meter.reset() def write(self, step, prefix): for name,", "2 return a + b def set_random_seed(seed: int, cuda: bool = False): random.seed(seed)", "val, n=1): self.average_meters[name].update(val, n) def reset(self): for meter in self.average_meters.values(): meter.reset() def write(self,", "mean self.std = std if self.mean is not None: self.mean = torch.tensor(self.mean).reshape(1, 3,", "+ self.mean image_tag = prefix + '/' + 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag", "embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def", "prefix + '/' + 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix + '/'", "self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def __init__(self, writer: SummaryWriter, mean=None, std=None): self._writer =", "= 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val", "0 self.sum = 0 self.count = 0 def reset(self): self.val = 0 self.avg", "None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std is not None: self.std", "import pyplot as plt try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX", "in meters} self._writer = writer def update(self, name: str, val, n=1): self.average_meters[name].update(val, n)", "writer: SummaryWriter): self._writer = writer def write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim", "reconstruction = reconstruction.cpu() if self.mean is not None and self.std is not None:", "'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse',", "class ImageLogger: def __init__(self, writer: SummaryWriter, mean=None, std=None): self._writer = writer self.mean =", "0 self.count = 0 def reset(self): self.val = 0 self.avg = 0 self.sum", "val self.sum += val * n self.count += n self.avg = self.sum /", "step, prefix): images = images.cpu() reconstruction = reconstruction.cpu() if self.mean is not None", "label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats: def __enter__(self): pass def __exit__(self):", "meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k in", "reconstruction.cpu() if self.mean is not None and self.std is not None: images =", "__exit__(self): pass def __call__(self): pass class AverageMeter(object): \"\"\" Computes and stores the average", "AccumulateStats: def __enter__(self): pass def __exit__(self): pass def __call__(self): pass class AverageMeter(object): \"\"\"", "'/' + name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def __init__(self, writer: SummaryWriter, mean=None,", "b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a + b", "self.average_meters.items(): tag = prefix + '/' + name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger:", "self.count = 0 def reset(self): self.val = 0 self.avg = 0 self.sum =", "self.sum / self.count class MeterLogger: def __init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str,", "update(self, val, n=1): self.val = val self.sum += val * n self.count +=", "3, 1, 1) if self.std is not None: self.std = torch.tensor(self.std).reshape(1, 3, 1,", "typing import Tuple, Dict import random import numpy as np import torch from", "a + b def set_random_seed(seed: int, cuda: bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed)", "self.std + self.mean reconstruction = reconstruction * self.std + self.mean image_tag = prefix", "step) class ImageLogger: def __init__(self, writer: SummaryWriter, mean=None, std=None): self._writer = writer self.mean", "self.mean is not None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std is", "= writer def write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings',", "n self.avg = self.sum / self.count class MeterLogger: def __init__(self, meters: Tuple[str], writer:", "def write(self, step, prefix): for name, meter in self.average_meters.items(): tag = prefix +", "self.count += n self.avg = self.sum / self.count class MeterLogger: def __init__(self, meters:", "1, 1) def write(self, images, reconstruction, step, prefix): images = images.cpu() reconstruction =", "name: str, val, n=1): self.average_meters[name].update(val, n) def reset(self): for meter in self.average_meters.values(): meter.reset()", "pyplot as plt try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import", "try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION =", "def __init__(self, writer: SummaryWriter, mean=None, std=None): self._writer = writer self.mean = mean self.std", "= writer self.mean = mean self.std = std if self.mean is not None:", "for name, meter in self.average_meters.items(): tag = prefix + '/' + name self._writer.add_scalar(tag,", "= 0 def update(self, val, n=1): self.val = val self.sum += val *", "'ship', 9: 'truck' } def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class", "def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats: def __enter__(self): pass", "std=None): self._writer = writer self.mean = mean self.std = std if self.mean is", "name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def __init__(self, writer: SummaryWriter, mean=None, std=None): self._writer", "reconstruction = reconstruction * self.std + self.mean image_tag = prefix + '/' +", "image_tag = prefix + '/' + 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix", "step) reconstruction_tag = prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger:", "+ 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag,", "ImageLogger: def __init__(self, writer: SummaryWriter, mean=None, std=None): self._writer = writer self.mean = mean", "reconstruction_tag = prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def", "self.std is not None: self.std = torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self, images,", "'truck' } def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats: def", "the average and current value \"\"\" def __init__(self): self.val = 0 self.avg =", "value \"\"\" def __init__(self): self.val = 0 self.avg = 0 self.sum = 0", "def set_random_seed(seed: int, cuda: bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed(seed)", "__init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0", "for meter in self.average_meters.values(): meter.reset() def write(self, step, prefix): for name, meter in", "+ '/' + name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def __init__(self, writer: SummaryWriter,", "'image' self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction,", "4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck' }", "- torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a + b def set_random_seed(seed: int, cuda: bool", "self._writer = writer self.mean = mean self.std = std if self.mean is not", "SummaryWriter, mean=None, std=None): self._writer = writer self.mean = mean self.std = std if", "SummaryWriter except ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane', 1:", "self.avg = 0 self.sum = 0 self.count = 0 def reset(self): self.val =", "self.sum += val * n self.count += n self.avg = self.sum / self.count", "= 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self,", "reset(self): for meter in self.average_meters.values(): meter.reset() def write(self, step, prefix): for name, meter", "sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device))", "class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer = writer def write(self, embeddings, step):", "def __enter__(self): pass def __exit__(self): pass def __call__(self): pass class AverageMeter(object): \"\"\" Computes", "= torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t() @ weights", "= reconstruction.cpu() if self.mean is not None and self.std is not None: images", "self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def", "self.std is not None: images = images * self.std + self.mean reconstruction =", "plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats: def __enter__(self): pass def __exit__(self): pass def", "+ 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer =", "'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'", "cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @ weights.t()", "a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t() @", "* self.std + self.mean reconstruction = reconstruction * self.std + self.mean image_tag =", "torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return", "step, prefix): for name, meter in self.average_meters.items(): tag = prefix + '/' +", "from tensorboardX import SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane', 1: 'automobile', 2: 'bird',", "0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val =", "class AccumulateStats: def __enter__(self): pass def __exit__(self): pass def __call__(self): pass class AverageMeter(object):", "bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.random.manual_seed(seed) torch.backends.cudnn.deterministic =", "\"\"\" Computes and stores the average and current value \"\"\" def __init__(self): self.val", "except ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane', 1: 'automobile',", "transforms from sklearn.metrics.pairwise import cosine_distances from matplotlib import pyplot as plt try: from", "0 self.avg = 0 self.sum = 0 self.count = 0 def reset(self): self.val", "MeterLogger: def __init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter()", "and self.std is not None: images = images * self.std + self.mean reconstruction", "+ name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def __init__(self, writer: SummaryWriter, mean=None, std=None):", "AverageMeter(object): \"\"\" Computes and stores the average and current value \"\"\" def __init__(self):", "'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer = writer", "Dict[str, AverageMeter] = {k: AverageMeter() for k in meters} self._writer = writer def", "def update(self, val, n=1): self.val = val self.sum += val * n self.count", "n=1): self.val = val self.sum += val * n self.count += n self.avg", "+= n self.avg = self.sum / self.count class MeterLogger: def __init__(self, meters: Tuple[str],", "= {k: AverageMeter() for k in meters} self._writer = writer def update(self, name:", "self.mean = mean self.std = std if self.mean is not None: self.mean =", "'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck' } def plot_cifar_image(image, label=\"\"):", "self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val", "def write(self, images, reconstruction, step, prefix): images = images.cpu() reconstruction = reconstruction.cpu() if", "pass def __exit__(self): pass def __call__(self): pass class AverageMeter(object): \"\"\" Computes and stores", "reconstruction, step) class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer = writer def write(self,", "0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val,", "__init__(self, writer: SummaryWriter): self._writer = writer def write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy()", "@ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device))", "= self.sum / self.count class MeterLogger: def __init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters:", "stores the average and current value \"\"\" def __init__(self): self.val = 0 self.avg", "5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck' } def plot_cifar_image(image,", "None and self.std is not None: images = images * self.std + self.mean", "def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count =", "= images * self.std + self.mean reconstruction = reconstruction * self.std + self.mean", "0 def update(self, val, n=1): self.val = val self.sum += val * n", "self.average_meters.values(): meter.reset() def write(self, step, prefix): for name, meter in self.average_meters.items(): tag =", "self.std = torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self, images, reconstruction, step, prefix): images", "Computes and stores the average and current value \"\"\" def __init__(self): self.val =", "dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2", "datasets, transforms from sklearn.metrics.pairwise import cosine_distances from matplotlib import pyplot as plt try:", "as plt try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter", "0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6:", "/ self.count class MeterLogger: def __init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter]", "not None: images = images * self.std + self.mean reconstruction = reconstruction *", "set_random_seed(seed: int, cuda: bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed)", "self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer = writer def", "def write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step,", "6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck' } def plot_cifar_image(image, label=\"\"): plt.title(label)", "self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k in meters} self._writer = writer", "@ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a + b def set_random_seed(seed: int,", "plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats: def __enter__(self): pass def", "__init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k", "+ '/' + 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix + '/' +", "\"\"\" def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count", "= images.cpu() reconstruction = reconstruction.cpu() if self.mean is not None and self.std is", "3, 1, 1) def write(self, images, reconstruction, step, prefix): images = images.cpu() reconstruction", "sklearn.metrics.pairwise import cosine_distances from matplotlib import pyplot as plt try: from torch.utils.tensorboard import", "writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k in meters} self._writer", "class AverageMeter(object): \"\"\" Computes and stores the average and current value \"\"\" def", "torch from torchvision import datasets, transforms from sklearn.metrics.pairwise import cosine_distances from matplotlib import", "self.average_meters[name].update(val, n) def reset(self): for meter in self.average_meters.values(): meter.reset() def write(self, step, prefix):", "current value \"\"\" def __init__(self): self.val = 0 self.avg = 0 self.sum =", "'/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer", "import random import numpy as np import torch from torchvision import datasets, transforms", "not None: self.std = torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self, images, reconstruction, step,", "import numpy as np import torch from torchvision import datasets, transforms from sklearn.metrics.pairwise", "torch.Tensor): a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t()", "3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9:", "2 b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a +", "tag = prefix + '/' + name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def", "def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b", "is not None: self.std = torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self, images, reconstruction,", "plt.show() class AccumulateStats: def __enter__(self): pass def __exit__(self): pass def __call__(self): pass class", "step) class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer = writer def write(self, embeddings,", "None: self.std = torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self, images, reconstruction, step, prefix):", "torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std is not None: self.std = torch.tensor(self.std).reshape(1, 3,", "is not None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std is not", "1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7:", "matplotlib import pyplot as plt try: from torch.utils.tensorboard import SummaryWriter except ImportError: from", "self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1):", "'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog',", "= cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @", "std if self.mean is not None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1) if", "} def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats: def __enter__(self):", "from typing import Tuple, Dict import random import numpy as np import torch", "update(self, name: str, val, n=1): self.average_meters[name].update(val, n) def reset(self): for meter in self.average_meters.values():", "import cosine_distances from matplotlib import pyplot as plt try: from torch.utils.tensorboard import SummaryWriter", "cuda: bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.random.manual_seed(seed) torch.backends.cudnn.deterministic", "images.cpu() reconstruction = reconstruction.cpu() if self.mean is not None and self.std is not", "7: 'horse', 8: 'ship', 9: 'truck' } def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2,", "meter.avg, step) class ImageLogger: def __init__(self, writer: SummaryWriter, mean=None, std=None): self._writer = writer", "= torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std is not None: self.std = torch.tensor(self.std).reshape(1,", "self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step)", "from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION = {", "Dict import random import numpy as np import torch from torchvision import datasets,", "= 0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0", "as np import torch from torchvision import datasets, transforms from sklearn.metrics.pairwise import cosine_distances", "torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t() @ weights -", "if self.std is not None: self.std = torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self,", "images * self.std + self.mean reconstruction = reconstruction * self.std + self.mean image_tag", "VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer = writer def write(self, embeddings, step): embeddings", "is not None: images = images * self.std + self.mean reconstruction = reconstruction", "in self.average_meters.items(): tag = prefix + '/' + name self._writer.add_scalar(tag, meter.avg, step) class", "prefix + '/' + name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def __init__(self, writer:", "{ 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog',", "embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor):", "self._writer = writer def update(self, name: str, val, n=1): self.average_meters[name].update(val, n) def reset(self):", "self.std = std if self.mean is not None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1,", "write(self, step, prefix): for name, meter in self.average_meters.items(): tag = prefix + '/'", "self.mean reconstruction = reconstruction * self.std + self.mean image_tag = prefix + '/'", "ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane', 1: 'automobile', 2:", "for k in meters} self._writer = writer def update(self, name: str, val, n=1):", "self.mean is not None and self.std is not None: images = images *", "images, step) reconstruction_tag = prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class", "8: 'ship', 9: 'truck' } def plot_cifar_image(image, label=\"\"): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show()", "sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights", "average and current value \"\"\" def __init__(self): self.val = 0 self.avg = 0" ]
[ "'data/B/event.txt' # with open(source_file_name, \"r\") as source_file: data = source_file.read() key = 'b'", "= source_file.read() key = 'b' # context = {'b': 2} value = context[key]", "'data/A/event.txt' sink_file_name = 'data/B/event.txt' # with open(source_file_name, \"r\") as source_file: data = source_file.read()", "source_file.read() key = 'b' # context = {'b': 2} value = context[key] #", "# with open(source_file_name, \"r\") as source_file: data = source_file.read() key = 'b' #", "\"r\") as source_file: data = source_file.read() key = 'b' # context = {'b':", "= 'data/A/event.txt' sink_file_name = 'data/B/event.txt' # with open(source_file_name, \"r\") as source_file: data =", "# context = {'b': 2} value = context[key] # print(value) #&&&# print(data) #", "context = {'b': 2} value = context[key] # print(value) #&&&# print(data) # with", "with open(source_file_name, \"r\") as source_file: data = source_file.read() key = 'b' # context", "source_file_name = 'data/A/event.txt' sink_file_name = 'data/B/event.txt' # with open(source_file_name, \"r\") as source_file: data", "#&&&# source_file_name = 'data/A/event.txt' sink_file_name = 'data/B/event.txt' # with open(source_file_name, \"r\") as source_file:", "open(source_file_name, \"r\") as source_file: data = source_file.read() key = 'b' # context =", "{'b': 2} value = context[key] # print(value) #&&&# print(data) # with open(sink_file_name, \"w\")", "key = 'b' # context = {'b': 2} value = context[key] # print(value)", "value = context[key] # print(value) #&&&# print(data) # with open(sink_file_name, \"w\") as sink_file:", "= context[key] # print(value) #&&&# print(data) # with open(sink_file_name, \"w\") as sink_file: sink_file.write(data)", "source_file: data = source_file.read() key = 'b' # context = {'b': 2} value", "2} value = context[key] # print(value) #&&&# print(data) # with open(sink_file_name, \"w\") as", "= 'data/B/event.txt' # with open(source_file_name, \"r\") as source_file: data = source_file.read() key =", "as source_file: data = source_file.read() key = 'b' # context = {'b': 2}", "= {'b': 2} value = context[key] # print(value) #&&&# print(data) # with open(sink_file_name,", "'b' # context = {'b': 2} value = context[key] # print(value) #&&&# print(data)", "sink_file_name = 'data/B/event.txt' # with open(source_file_name, \"r\") as source_file: data = source_file.read() key", "data = source_file.read() key = 'b' # context = {'b': 2} value =", "= 'b' # context = {'b': 2} value = context[key] # print(value) #&&&#" ]
[ "Migration(migrations.Migration): dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations = [ migrations.AlterField( model_name='bill', name='extras',", "1.9.7 on 2016-07-07 23:57 from __future__ import unicode_literals from django.db import migrations import", "max_length=255), ), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='person', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255),", "migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField(", "max_length=255), ), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255),", "import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations = [", "jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations = [ migrations.AlterField(", "__future__ import unicode_literals from django.db import migrations import jsonfield.fields class Migration(migrations.Migration): dependencies =", "migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='person', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), ]", "unicode_literals from django.db import migrations import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('councilmatic_core',", "23:57 from __future__ import unicode_literals from django.db import migrations import jsonfield.fields class Migration(migrations.Migration):", "[ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations = [ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ),", "on 2016-07-07 23:57 from __future__ import unicode_literals from django.db import migrations import jsonfield.fields", "dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations = [ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}',", "Django 1.9.7 on 2016-07-07 23:57 from __future__ import unicode_literals from django.db import migrations", "), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='person', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ),", "migrations import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations =", "import unicode_literals from django.db import migrations import jsonfield.fields class Migration(migrations.Migration): dependencies = [", "by Django 1.9.7 on 2016-07-07 23:57 from __future__ import unicode_literals from django.db import", "model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization',", "# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-07-07 23:57", "# Generated by Django 1.9.7 on 2016-07-07 23:57 from __future__ import unicode_literals from", "name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='person', name='extras',", "-*- # Generated by Django 1.9.7 on 2016-07-07 23:57 from __future__ import unicode_literals", "Generated by Django 1.9.7 on 2016-07-07 23:57 from __future__ import unicode_literals from django.db", "field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}',", "utf-8 -*- # Generated by Django 1.9.7 on 2016-07-07 23:57 from __future__ import", "-*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-07-07 23:57 from", "[ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ),", "from __future__ import unicode_literals from django.db import migrations import jsonfield.fields class Migration(migrations.Migration): dependencies", "migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField(", "('councilmatic_core', '0012_auto_20160707_1859'), ] operations = [ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField(", "class Migration(migrations.Migration): dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations = [ migrations.AlterField( model_name='bill',", "= [ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255),", "operations = [ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}',", "import migrations import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations", "field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='person', name='extras', field=jsonfield.fields.JSONCharField(default='{}',", "model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='person',", "= [ ('councilmatic_core', '0012_auto_20160707_1859'), ] operations = [ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255),", "django.db import migrations import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'), ]", "2016-07-07 23:57 from __future__ import unicode_literals from django.db import migrations import jsonfield.fields class", "] operations = [ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event', name='extras',", "from django.db import migrations import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('councilmatic_core', '0012_auto_20160707_1859'),", "name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization', name='extras',", "'0012_auto_20160707_1859'), ] operations = [ migrations.AlterField( model_name='bill', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='event',", "), migrations.AlterField( model_name='event', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ), migrations.AlterField( model_name='organization', name='extras', field=jsonfield.fields.JSONCharField(default='{}', max_length=255), ),", "coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-07-07 23:57 from __future__" ]
[ "version, location, and installed files. Installed files requires a pip generated 'installed-files.txt' in", "package[key] = pkg_info_dict.get(key) # It looks like FeedParser can not deal with repeated", "for l in lines] paths = [os.path.join(dist.location, p) for p in paths] file_list", "Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict =", "args if options.pypi: with self._build_session(options) as session: results = search_packages_info(query, options.index, session) else:", "e in dist.extras: reqs = set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package, reqs) if", "r = next((r for r in p.requires() if r.key == dist.key), None) if", "**kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full", "for r_ in dist.requires()] package = { 'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version,", "logger.info(\"Requires:\") for line in sorted(dist['requires']): logger.info(\" %s\", line) for extra_name, deps in dist['extras'].items():", "if 'entry_points' in dist: logger.info(\"Entry-points:\") for line in dist['entry_points']: logger.info(\" %s\", line.strip()) return", "not deal with repeated headers classifiers = [] for line in metadata.splitlines(): if", "= [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else:", "options, args): if not args: logger.warning('ERROR: Please provide a package name or names.')", "Package Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi version')", "required_by.append(\"%s %s\" % (p.project_name, r.specifier)) else: for e in p.extras: r = next(", "packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true',", "'--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args):", "[l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for p in paths]", "p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths]", "import os from pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR from pip._vendor", "if options.pypi: with self._build_session(options) as session: results = search_packages_info(query, options.index, session) else: results", "except pkg_resources.DistributionNotFound: pass return \"%s [%s]\" % (r, installed_ver) def search_packages_info(query, index_url=None, session=None):", "'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires': requires, 'required_by': required_by, 'extras':", "import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information about one or more", "in query] distributions = [installed[pkg] for pkg in query_names if pkg in installed]", "else 'UNKNOWN' else: pypi_version = None requires = [_format_package(r_) for r_ in dist.requires()]", "not line: break # Classifier: License :: OSI Approved :: MIT License if", "\"\"\" Print the informations from installed distributions found. \"\"\" results_printed = False for", "paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log for", "pkg_info_dict.get(key) # It looks like FeedParser can not deal with repeated headers classifiers", "None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break", "line.strip() break package['installer'] = installer # @todo: Should pkg_resources.Distribution have a # `get_pkg_info`", "default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi',", "% (p.project_name, r.specifier)) else: for e in p.extras: r = next( (r for", "log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for", "if dist['installer'] is not None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\",", "metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi',", "(r, installed_ver) def search_packages_info(query, index_url=None, session=None): \"\"\" Gather details from installed distributions. Print", "entry_points installer = None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer", "installer = line.strip() break package['installer'] = installer # @todo: Should pkg_resources.Distribution have a", "for r in p.requires() if r.key == dist.key), None) if r: required_by.append(\"%s %s\"", "\"%s [%s]\" % (r, installed_ver) def search_packages_info(query, index_url=None, session=None): \"\"\" Gather details from", "in p.extras: r = next( (r for r in p.requires([e]) if r.key ==", "with self._build_session(options) as session: results = search_packages_info(query, options.index, session) else: results = search_packages_info(query,", "installed] for dist in distributions: required_by = [] for _, p in installed.items():", "if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths]", "options.pypi: with self._build_session(options) as session: results = search_packages_info(query, options.index, session) else: results =", "FeedParser import logging import os from pip.basecommand import Command from pip.status_codes import SUCCESS,", "of Python Package Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show", "= feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] =", "class ShowCommand(Command): \"\"\"Show information about one or more installed packages.\"\"\" name = 'show'", "return ERROR query = args if options.pypi: with self._build_session(options) as session: results =", "extras = {} for e in dist.extras: reqs = set(dist.requires([e])) - set(dist.requires()) extras[e]", "informations from installed distributions found. \"\"\" results_printed = False for dist in distributions:", "found. \"\"\" results_printed = False for dist in distributions: results_printed = True logger.info(\"---\")", "'files' in dist: for line in dist['files']: logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot locate", "in distributions: required_by = [] for _, p in installed.items(): r = next((r", "logger = logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information about one or more installed packages.\"\"\"", "if r.key == dist.key), None ) if r: required_by.append( \"%s[%s] %s\" % (p.project_name,", "logger.info(\"Classifiers:\") for classifier in dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\") for line in sorted(dist['requires']):", "dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires': requires, 'required_by': required_by, 'extras': extras", "(default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts)", "from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources logger = logging.getLogger(__name__) class", "dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer'] is not None: logger.info(\"Installer:", "pip._vendor import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information about one or", "Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0,", "import absolute_import from email.parser import FeedParser import logging import os from pip.basecommand import", "Print the informations from installed distributions found. \"\"\" results_printed = False for dist", "Command from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources logger = logging.getLogger(__name__)", "'):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions,", ":: OSI Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] =", "= map(_format_package, reqs) if session: from pip.download import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client", "import xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name)", "dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break package['installer'] =", "= FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author',", "extra_name, deps in dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name) for line in sorted(deps): logger.info(\"", "@todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict", "default=False, help='Show the full list of installed files for each package.') self.cmd_opts.add_option( '--index',", "for p in pkg_resources.working_set]) query_names = [name.lower() for name in query] distributions =", "pypi_releases else 'UNKNOWN' else: pypi_version = None requires = [_format_package(r_) for r_ in", "'pypi_version': pypi_version, 'location': dist.location, 'requires': requires, 'required_by': required_by, 'extras': extras } file_list =", "session: results = search_packages_info(query, options.index, session) else: results = search_packages_info(query, options.index) if not", "p in pkg_resources.working_set]) query_names = [name.lower() for name in query] distributions = [installed[pkg]", "for e in p.extras: r = next( (r for r in p.requires([e]) if", "p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt')", "classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package def", "set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package, reqs) if session: from pip.download import PipXmlrpcTransport", "= True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\", dist['version']) if", "PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if", "pypi_releases[0] if pypi_releases else 'UNKNOWN' else: pypi_version = None requires = [_format_package(r_) for", "reqs = set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package, reqs) if session: from pip.download", "('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like", "set(dist.requires()) extras[e] = map(_format_package, reqs) if session: from pip.download import PipXmlrpcTransport from pip._vendor.six.moves", "results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\", dist['version'])", "p) for p in pkg_resources.working_set]) query_names = [name.lower() for name in query] distributions", "query_names = [name.lower() for name in query] distributions = [installed[pkg] for pkg in", "pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close()", "is not None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\")", "logger.info(\"Cannot locate installed-files.txt\") if 'entry_points' in dist: logger.info(\"Entry-points:\") for line in dist['entry_points']: logger.info(\"", "pass return \"%s [%s]\" % (r, installed_ver) def search_packages_info(query, index_url=None, session=None): \"\"\" Gather", "a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. \"\"\" installed = dict(", "= next((r for r in p.requires() if r.key == dist.key), None) if r:", "for dist in distributions: required_by = [] for _, p in installed.items(): r", "if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list)", "True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']:", "files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. \"\"\" installed", "p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'):", "logger.info(\"Required by(%d):\", len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if list_all_files: logger.info(\"Files:\")", "query = args if options.pypi: with self._build_session(options) as session: results = search_packages_info(query, options.index,", "= xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN'", "distributions = [installed[pkg] for pkg in query_names if pkg in installed] for dist", "= classifiers if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_all_files): \"\"\"", "'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires': requires, 'required_by': required_by, 'extras': extras }", "installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files',", "p.requires([e]) if r.key == dist.key), None ) if r: required_by.append( \"%s[%s] %s\" %", "extras[e] = map(_format_package, reqs) if session: from pip.download import PipXmlrpcTransport from pip._vendor.six.moves import", "# RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD')", "= [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p", "index_url=None, session=None): \"\"\" Gather details from installed distributions. Print distribution name, version, location,", "if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points", "self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self,", "metadata.splitlines(): if not line: break # Classifier: License :: OSI Approved :: MIT", "distributions. Print distribution name, version, location, and installed files. Installed files requires a", "p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's", "SUCCESS, ERROR from pip._vendor import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information", "dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for classifier in dist['classifiers']: logger.info(\"", "required_by.append( \"%s[%s] %s\" % (p.project_name, e, r.specifier)) extras = {} for e in", "from installed distributions. Print distribution name, version, location, and installed files. Installed files", "= line.strip() break package['installer'] = installer # @todo: Should pkg_resources.Distribution have a #", "for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points =", "requires = [_format_package(r_) for r_ in dist.requires()] package = { 'name': dist.project_name, 'version':", "# It looks like FeedParser can not deal with repeated headers classifiers =", "False for dist in distributions: results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name:", "dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name) for line in sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required", "[%s]:\", extra_name) for line in sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for", "r = next( (r for r in p.requires([e]) if r.key == dist.key), None", "== dist.key), None) if r: required_by.append(\"%s %s\" % (p.project_name, r.specifier)) else: for e", "transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN' else: pypi_version", "default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args:", "installed_ver) def search_packages_info(query, index_url=None, session=None): \"\"\" Gather details from installed distributions. Print distribution", "email.parser import FeedParser import logging import os from pip.basecommand import Command from pip.status_codes", "p.extras: r = next( (r for r in p.requires([e]) if r.key == dist.key),", "if not print_results(results, options.files): return ERROR return SUCCESS def _format_package(requirement): r = requirement", "\"\"\" %prog [options] <package> ...\"\"\" summary = 'Show information about installed packages.' def", "line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break package['installer'] = installer #", "%s\", line.strip()) else: logger.info(\"Cannot locate installed-files.txt\") if 'entry_points' in dist: logger.info(\"Entry-points:\") for line", "if r: required_by.append( \"%s[%s] %s\" % (p.project_name, e, r.specifier)) extras = {} for", "'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser can not", "about one or more installed packages.\"\"\" name = 'show' usage = \"\"\" %prog", "in dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\") for line in sorted(dist['requires']): logger.info(\" %s\", line)", "by(%d):\", len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if list_all_files: logger.info(\"Files:\") if", "\"\"\" Gather details from installed distributions. Print distribution name, version, location, and installed", ".dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in", "distributions: results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\",", "import SUCCESS, ERROR from pip._vendor import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show", "paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list =", "pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN' else: pypi_version = None requires", "if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'):", "pkg_resources.working_set]) query_names = [name.lower() for name in query] distributions = [installed[pkg] for pkg", "r: required_by.append(\"%s %s\" % (p.project_name, r.specifier)) else: for e in p.extras: r =", "from pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources", "of installed files for each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL", "next( (r for r in p.requires([e]) if r.key == dist.key), None ) if", "in installed.items(): r = next((r for r in p.requires() if r.key == dist.key),", "feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'):", "else: # Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt')", "part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for", "def print_results(distributions, list_all_files): \"\"\" Print the informations from installed distributions found. \"\"\" results_printed", "installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass return \"%s [%s]\" % (r, installed_ver) def", "logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for classifier in dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\") for", "pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases else", "= args if options.pypi: with self._build_session(options) as session: results = search_packages_info(query, options.index, session)", "'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser", "self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please provide a", "logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary:", "Classifier: License :: OSI Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):])", "self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of", "print_results(results, options.files): return ERROR return SUCCESS def _format_package(requirement): r = requirement installed_ver =", "dest='pypi', action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if", "search_packages_info(query, options.index) if not print_results(results, options.files): return ERROR return SUCCESS def _format_package(requirement): r", "== dist.key), None ) if r: required_by.append( \"%s[%s] %s\" % (p.project_name, e, r.specifier))", "if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's", "r.specifier)) extras = {} for e in dist.extras: reqs = set(dist.requires([e])) - set(dist.requires())", ".egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in", "None) if r: required_by.append(\"%s %s\" % (p.project_name, r.specifier)) else: for e in p.extras:", "%s\", dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for classifier in dist['classifiers']:", "installed = dict( [(p.key, p) for p in pkg_resources.working_set]) query_names = [name.lower() for", "return SUCCESS def _format_package(requirement): r = requirement installed_ver = '-' try: d =", "PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport)", "action='store_true', default=False, help='Show the full list of installed files for each package.') self.cmd_opts.add_option(", "name or names.') return ERROR query = args if options.pypi: with self._build_session(options) as", "logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer'] is not None: logger.info(\"Installer: %s\",", "looks like FeedParser can not deal with repeated headers classifiers = [] for", "metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines]", "= pkg_info_dict.get(key) # It looks like FeedParser can not deal with repeated headers", "dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version'])", "r = requirement installed_ver = '-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version)", "else: results = search_packages_info(query, options.index) if not print_results(results, options.files): return ERROR return SUCCESS", "dest='files', action='store_true', default=False, help='Show the full list of installed files for each package.')", "dict( [(p.key, p) for p in pkg_resources.working_set]) query_names = [name.lower() for name in", "in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points']", "package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default", "{} for e in dist.extras: reqs = set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package,", "'UNKNOWN' else: pypi_version = None requires = [_format_package(r_) for r_ in dist.requires()] package", "and installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions", "package def print_results(distributions, list_all_files): \"\"\" Print the informations from installed distributions found. \"\"\"", "line.strip(): installer = line.strip() break package['installer'] = installer # @todo: Should pkg_resources.Distribution have", "e in p.extras: r = next( (r for r in p.requires([e]) if r.key", "information about one or more installed packages.\"\"\" name = 'show' usage = \"\"\"", "_, p in installed.items(): r = next((r for r in p.requires() if r.key", "in p.requires([e]) if r.key == dist.key), None ) if r: required_by.append( \"%s[%s] %s\"", "p.requires() if r.key == dist.key), None) if r: required_by.append(\"%s %s\" % (p.project_name, r.specifier))", "str(d.version) except pkg_resources.DistributionNotFound: pass return \"%s [%s]\" % (r, installed_ver) def search_packages_info(query, index_url=None,", "= {} for e in dist.extras: reqs = set(dist.requires([e])) - set(dist.requires()) extras[e] =", "dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location)", "sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if list_all_files: logger.info(\"Files:\") if 'files' in dist: for line", "installed_ver = '-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass", "for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break package['installer'] = installer", "`get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version',", "for e in dist.extras: reqs = set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package, reqs)", "dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths =", "'--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.cmd_opts.add_option(", "installed distributions. Print distribution name, version, location, and installed files. Installed files requires", "r in p.requires() if r.key == dist.key), None) if r: required_by.append(\"%s %s\" %", "location, and installed files. Installed files requires a pip generated 'installed-files.txt' in the", "requires, 'required_by': required_by, 'extras': extras } file_list = None metadata = None if", "= None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas", "r.key == dist.key), None) if r: required_by.append(\"%s %s\" % (p.project_name, r.specifier)) else: for", "Please provide a package name or names.') return ERROR query = args if", "None ) if r: required_by.append( \"%s[%s] %s\" % (p.project_name, e, r.specifier)) extras =", "None requires = [_format_package(r_) for r_ in dist.requires()] package = { 'name': dist.project_name,", "sorted(dist['requires']): logger.info(\" %s\", line) for extra_name, deps in dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name)", "reqs) if session: from pip.download import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport =", "if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None if dist.has_metadata('INSTALLER'):", "= { 'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires': requires, 'required_by':", "= set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package, reqs) if session: from pip.download import", "installed.items(): r = next((r for r in p.requires() if r.key == dist.key), None)", "def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False,", "dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer'] is not None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License:", "for _, p in installed.items(): r = next((r for r in p.requires() if", "pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key]", "= pypi_releases[0] if pypi_releases else 'UNKNOWN' else: pypi_version = None requires = [_format_package(r_)", "dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\",", "'requires': requires, 'required_by': required_by, 'extras': extras } file_list = None metadata = None", "dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'):", "= 'Show information about installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw)", "full list of installed files for each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi',", "dist.key), None ) if r: required_by.append( \"%s[%s] %s\" % (p.project_name, e, r.specifier)) extras", "= None requires = [_format_package(r_) for r_ in dist.requires()] package = { 'name':", "in dist['files']: logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot locate installed-files.txt\") if 'entry_points' in dist:", "line.strip()) else: logger.info(\"Cannot locate installed-files.txt\") if 'entry_points' in dist: logger.info(\"Entry-points:\") for line in", "%default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def", "(p.project_name, r.specifier)) else: for e in p.extras: r = next( (r for r", "= [_format_package(r_) for r_ in dist.requires()] package = { 'name': dist.project_name, 'version': dist.version,", "file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA')", "dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\") for line in sorted(dist['requires']): logger.info(\" %s\", line) for", "paths = [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for", "'show' usage = \"\"\" %prog [options] <package> ...\"\"\" summary = 'Show information about", "logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer'] is not", "help='Show the full list of installed files for each package.') self.cmd_opts.add_option( '--index', dest='index',", "dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires': requires, 'required_by': required_by, 'extras': extras } file_list", "logging import os from pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR from", "with repeated headers classifiers = [] for line in metadata.splitlines(): if not line:", "= pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN' else: pypi_version = None", "in query_names if pkg in installed] for dist in distributions: required_by = []", "logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer'] is not None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\",", "help='Base URL of Python Package Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true',", "search_packages_info(query, options.index, session) else: results = search_packages_info(query, options.index) if not print_results(results, options.files): return", "[_format_package(r_) for r_ in dist.requires()] package = { 'name': dist.project_name, 'version': dist.version, 'pypi_version':", "file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO')", "pypi_version, 'location': dist.location, 'requires': requires, 'required_by': required_by, 'extras': extras } file_list = None", "for line in dist['files']: logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot locate installed-files.txt\") if 'entry_points'", "None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for classifier", "else: logger.info(\"Cannot locate installed-files.txt\") if 'entry_points' in dist: logger.info(\"Entry-points:\") for line in dist['entry_points']:", "deal with repeated headers classifiers = [] for line in metadata.splitlines(): if not", "if list_all_files: logger.info(\"Files:\") if 'files' in dist: for line in dist['files']: logger.info(\" %s\",", "line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield", "not print_results(results, options.files): return ERROR return SUCCESS def _format_package(requirement): r = requirement installed_ver", "%s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer'] is not None: logger.info(\"Installer: %s\", dist['installer'])", "# @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata)", "len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if list_all_files: logger.info(\"Files:\") if 'files'", "version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please provide", "= str(d.version) except pkg_resources.DistributionNotFound: pass return \"%s [%s]\" % (r, installed_ver) def search_packages_info(query,", "lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location,", "= None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part", "'.egg-info' directory. \"\"\" installed = dict( [(p.key, p) for p in pkg_resources.working_set]) query_names", "= sorted(file_list) yield package def print_results(distributions, list_all_files): \"\"\" Print the informations from installed", "logger.info(\"Extra Require [%s]:\", extra_name) for line in sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\",", "OSI Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers", "dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page'))", "should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths =", "for line in sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for line in", "distributions found. \"\"\" results_printed = False for dist in distributions: results_printed = True", "[os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in", "for each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package", "if r.key == dist.key), None) if r: required_by.append(\"%s %s\" % (p.project_name, r.specifier)) else:", "each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index", "line in sorted(dist['requires']): logger.info(\" %s\", line) for extra_name, deps in dist['extras'].items(): logger.info(\"Extra Require", "options.files): return ERROR return SUCCESS def _format_package(requirement): r = requirement installed_ver = '-'", "absolute_import from email.parser import FeedParser import logging import os from pip.basecommand import Command", "paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] =", "query_names if pkg in installed] for dist in distributions: required_by = [] for", "[] for line in metadata.splitlines(): if not line: break # Classifier: License ::", "packages.\"\"\" name = 'show' usage = \"\"\" %prog [options] <package> ...\"\"\" summary =", "pip generated 'installed-files.txt' in the distributions '.egg-info' directory. \"\"\" installed = dict( [(p.key,", "self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of installed files", "requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. \"\"\" installed =", "yield package def print_results(distributions, list_all_files): \"\"\" Print the informations from installed distributions found.", "action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not", "in sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if list_all_files: logger.info(\"Files:\") if 'files' in dist: for", "in metadata.splitlines(): if not line: break # Classifier: License :: OSI Approved ::", "for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) #", "return \"%s [%s]\" % (r, installed_ver) def search_packages_info(query, index_url=None, session=None): \"\"\" Gather details", "locate installed-files.txt\") if 'entry_points' in dist: logger.info(\"Entry-points:\") for line in dist['entry_points']: logger.info(\" %s\",", "for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use", "query] distributions = [installed[pkg] for pkg in query_names if pkg in installed] for", "help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR:", "logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot locate installed-files.txt\") if 'entry_points' in dist: logger.info(\"Entry-points:\") for", "distributions '.egg-info' directory. \"\"\" installed = dict( [(p.key, p) for p in pkg_resources.working_set])", "in dist.extras: reqs = set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package, reqs) if session:", "from pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases", "dist['installer'] is not None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\", dist['location'])", "self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)')", "requirement installed_ver = '-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound:", "line in sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if list_all_files: logger.info(\"Files:\") if 'files' in dist:", "list_all_files: logger.info(\"Files:\") if 'files' in dist: for line in dist['files']: logger.info(\" %s\", line.strip())", "[installed[pkg] for pkg in query_names if pkg in installed] for dist in distributions:", "pypi_version = None requires = [_format_package(r_) for r_ in dist.requires()] package = {", "%s\", line.strip()) if list_all_files: logger.info(\"Files:\") if 'files' in dist: for line in dist['files']:", "= [] for line in metadata.splitlines(): if not line: break # Classifier: License", "'entry_points' in dist: logger.info(\"Entry-points:\") for line in dist['entry_points']: logger.info(\" %s\", line.strip()) return results_printed", "from pip._vendor import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information about one", "results_printed = False for dist in distributions: results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version: %s\",", "for line in metadata.splitlines(): if not line: break # Classifier: License :: OSI", "= installer # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser =", "name, version, location, and installed files. Installed files requires a pip generated 'installed-files.txt'", "directory. \"\"\" installed = dict( [(p.key, p) for p in pkg_resources.working_set]) query_names =", "session: from pip.download import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url, session)", "else: for e in p.extras: r = next( (r for r in p.requires([e])", "not None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for", "file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be", "if pypi_releases else 'UNKNOWN' else: pypi_version = None requires = [_format_package(r_) for r_", "'location': dist.location, 'requires': requires, 'required_by': required_by, 'extras': extras } file_list = None metadata", "dist['files']: logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot locate installed-files.txt\") if 'entry_points' in dist: logger.info(\"Entry-points:\")", "dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points", "results = search_packages_info(query, options.index) if not print_results(results, options.files): return ERROR return SUCCESS def", "paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata =", "next((r for r in p.requires() if r.key == dist.key), None) if r: required_by.append(\"%s", "SUCCESS def _format_package(requirement): r = requirement installed_ver = '-' try: d = pkg_resources.get_distribution(r.project_name)", "metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'):", "FeedParser can not deal with repeated headers classifiers = [] for line in", "logger.info(\" %s\", line.strip()) if list_all_files: logger.info(\"Files:\") if 'files' in dist: for line in", "ERROR from pip._vendor import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information about", "headers classifiers = [] for line in metadata.splitlines(): if not line: break #", "License :: OSI Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers']", "print_results(distributions, list_all_files): \"\"\" Print the informations from installed distributions found. \"\"\" results_printed =", "classifiers if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_all_files): \"\"\" Print", "= [installed[pkg] for pkg in query_names if pkg in installed] for dist in", "pkg in installed] for dist in distributions: required_by = [] for _, p", "dist.key), None) if r: required_by.append(\"%s %s\" % (p.project_name, r.specifier)) else: for e in", "pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command):", "package = { 'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires': requires,", "names.') return ERROR query = args if options.pypi: with self._build_session(options) as session: results", "\"\"\" results_printed = False for dist in distributions: results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version:", "metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info", "RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths", "[os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: #", "%s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\",", "for line in sorted(dist['requires']): logger.info(\" %s\", line) for extra_name, deps in dist['extras'].items(): logger.info(\"Extra", "dist['name']) logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary'))", "extra_name) for line in sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for line", "dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list", "def run(self, options, args): if not args: logger.warning('ERROR: Please provide a package name", "pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass return \"%s [%s]\" % (r, installed_ver)", "options.index, session) else: results = search_packages_info(query, options.index) if not print_results(results, options.files): return ERROR", "= [] for _, p in installed.items(): r = next((r for r in", "pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information about one or more installed", "%s\", dist['name']) logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\",", "[(p.key, p) for p in pkg_resources.working_set]) query_names = [name.lower() for name in query]", "paths = [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for", "if 'files' in dist: for line in dist['files']: logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot", "in the distributions '.egg-info' directory. \"\"\" installed = dict( [(p.key, p) for p", "%prog [options] <package> ...\"\"\" summary = 'Show information about installed packages.' def __init__(self,", "%s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if", "ShowCommand(Command): \"\"\"Show information about one or more installed packages.\"\"\" name = 'show' usage", "session=None): \"\"\" Gather details from installed distributions. Print distribution name, version, location, and", "files for each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python", "the informations from installed distributions found. \"\"\" results_printed = False for dist in", "else: pypi_version = None requires = [_format_package(r_) for r_ in dist.requires()] package =", "% (p.project_name, e, r.specifier)) extras = {} for e in dist.extras: reqs =", "dist.get('author-email')) if dist['installer'] is not None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location:", "[%s]\" % (r, installed_ver) def search_packages_info(query, index_url=None, session=None): \"\"\" Gather details from installed", "= '-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass return", "list_all_files): \"\"\" Print the informations from installed distributions found. \"\"\" results_printed = False", "session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases", "results = search_packages_info(query, options.index, session) else: results = search_packages_info(query, options.index) if not print_results(results,", "[os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in", "'installed-files.txt' in the distributions '.egg-info' directory. \"\"\" installed = dict( [(p.key, p) for", "logger.info(\"Files:\") if 'files' in dist: for line in dist['files']: logger.info(\" %s\", line.strip()) else:", "<package> ...\"\"\" summary = 'Show information about installed packages.' def __init__(self, *args, **kw):", "installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info'", "line: break # Classifier: License :: OSI Approved :: MIT License if line.startswith('Classifier:", "installed-files.txt\") if 'entry_points' in dist: logger.info(\"Entry-points:\") for line in dist['entry_points']: logger.info(\" %s\", line.strip())", "logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version:", "for name in query] distributions = [installed[pkg] for pkg in query_names if pkg", "line in metadata.splitlines(): if not line: break # Classifier: License :: OSI Approved", "dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None if dist.has_metadata('INSTALLER'): for", "%s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer']", "dist in distributions: required_by = [] for _, p in installed.items(): r =", "= 'show' usage = \"\"\" %prog [options] <package> ...\"\"\" summary = 'Show information", "if session: from pip.download import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url,", "FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email',", "lines] paths = [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location)", "the full list of installed files for each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL',", "information about installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f',", "transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version =", "if not line: break # Classifier: License :: OSI Approved :: MIT License", "dist.location, 'requires': requires, 'required_by': required_by, 'extras': extras } file_list = None metadata =", "distribution name, version, location, and installed files. Installed files requires a pip generated", "Python Package Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi", "package['entry_points'] = entry_points installer = None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if", "%s\", dist.get('author-email')) if dist['installer'] is not None: logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\", dist.get('license'))", "import FeedParser import logging import os from pip.basecommand import Command from pip.status_codes import", "installer = None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer =", "= search_packages_info(query, options.index, session) else: results = search_packages_info(query, options.index) if not print_results(results, options.files):", "Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if", "# Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths", "for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p", "%s\", dist['location']) logger.info(\"Classifiers:\") for classifier in dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\") for line", "__future__ import absolute_import from email.parser import FeedParser import logging import os from pip.basecommand", "if line.strip(): installer = line.strip() break package['installer'] = installer # @todo: Should pkg_resources.Distribution", "break # Classifier: License :: OSI Approved :: MIT License if line.startswith('Classifier: '):", "args): if not args: logger.warning('ERROR: Please provide a package name or names.') return", "required_by = [] for _, p in installed.items(): r = next((r for r", "d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass return \"%s [%s]\" %", "more installed packages.\"\"\" name = 'show' usage = \"\"\" %prog [options] <package> ...\"\"\"", "break package['installer'] = installer # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method?", "in p.requires() if r.key == dist.key), None) if r: required_by.append(\"%s %s\" % (p.project_name,", "Gather details from installed distributions. Print distribution name, version, location, and installed files.", "(p.project_name, e, r.specifier)) extras = {} for e in dist.extras: reqs = set(dist.requires([e]))", "'-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of installed files for", "dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths =", "extras } file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs", "in dist.requires()] package = { 'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location,", "a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key", "if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break package['installer']", "files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory.", "'required_by': required_by, 'extras': extras } file_list = None metadata = None if isinstance(dist,", "ERROR query = args if options.pypi: with self._build_session(options) as session: results = search_packages_info(query,", "if pkg in installed] for dist in distributions: required_by = [] for _,", "line in dist['files']: logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot locate installed-files.txt\") if 'entry_points' in", "= dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None", "'--files', dest='files', action='store_true', default=False, help='Show the full list of installed files for each", "dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.cmd_opts.add_option( '-p',", "r_ in dist.requires()] package = { 'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location':", "\"%s[%s] %s\" % (p.project_name, e, r.specifier)) extras = {} for e in dist.extras:", "pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p)", "logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page:", "dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break package['installer'] = installer # @todo: Should", "classifiers = [] for line in metadata.splitlines(): if not line: break # Classifier:", "in installed] for dist in distributions: required_by = [] for _, p in", "_format_package(requirement): r = requirement installed_ver = '-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver =", "= pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass return \"%s [%s]\" % (r,", "pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN' else: pypi_version =", "args: logger.warning('ERROR: Please provide a package name or names.') return ERROR query =", ":: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list:", "logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi", "l in lines] paths = [os.path.join(dist.location, p) for p in paths] file_list =", "dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's if", "def _format_package(requirement): r = requirement installed_ver = '-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver", "for pkg in query_names if pkg in installed] for dist in distributions: required_by", "provide a package name or names.') return ERROR query = args if options.pypi:", "MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files']", "sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(\" %s\",", "'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser can not deal with", "It looks like FeedParser can not deal with repeated headers classifiers = []", "pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases =", "session) else: results = search_packages_info(query, options.index) if not print_results(results, options.files): return ERROR return", "metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer =", "p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'):", "installer # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser()", "sorted(file_list) yield package def print_results(distributions, list_all_files): \"\"\" Print the informations from installed distributions", "logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author'))", "[options] <package> ...\"\"\" summary = 'Show information about installed packages.' def __init__(self, *args,", "if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths", "super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list", "import logging import os from pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR", "dist['location']) logger.info(\"Classifiers:\") for classifier in dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\") for line in", "'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser can not deal", "if dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author:", "URL of Python Package Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False,", "= dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p,", "xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version", "in dist: for line in dist['files']: logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot locate installed-files.txt\")", "pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines =", "= dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p)", "generated 'installed-files.txt' in the distributions '.egg-info' directory. \"\"\" installed = dict( [(p.key, p)", "= [name.lower() for name in query] distributions = [installed[pkg] for pkg in query_names", "installed files for each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of", "= False for dist in distributions: results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version'))", "= requirement installed_ver = '-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except", "name in query] distributions = [installed[pkg] for pkg in query_names if pkg in", "None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of", "dist.requires()] package = { 'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires':", "dist.extras: reqs = set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package, reqs) if session: from", "dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise", "= next( (r for r in p.requires([e]) if r.key == dist.key), None )", "one or more installed packages.\"\"\" name = 'show' usage = \"\"\" %prog [options]", "# `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in", "logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for classifier in dist['classifiers']: logger.info(\" %s\",", "in sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(\"", "Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths =", "feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page',", "file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_all_files): \"\"\" Print the informations", "= PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0]", "= [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if", "package['installer'] = installer # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser", "%s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version: %s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version: %s\",", "[name.lower() for name in query] distributions = [installed[pkg] for pkg in query_names if", "logger.info(\" %s\", line) for extra_name, deps in dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name) for", "summary = 'Show information about installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args,", "= dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths", "Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. \"\"\"", "'): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package", "%s\", classifier) logger.info(\"Requires:\") for line in sorted(dist['requires']): logger.info(\" %s\", line) for extra_name, deps", "from installed distributions found. \"\"\" results_printed = False for dist in distributions: results_printed", "import Command from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources logger =", "% (r, installed_ver) def search_packages_info(query, index_url=None, session=None): \"\"\" Gather details from installed distributions.", "if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_all_files): \"\"\" Print the", "e, r.specifier)) extras = {} for e in dist.extras: reqs = set(dist.requires([e])) -", "= logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information about one or more installed packages.\"\"\" name", "logging.getLogger(__name__) class ShowCommand(Command): \"\"\"Show information about one or more installed packages.\"\"\" name =", ") if r: required_by.append( \"%s[%s] %s\" % (p.project_name, e, r.specifier)) extras = {}", "*args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the", "dist in distributions: results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name'])", "if r: required_by.append(\"%s %s\" % (p.project_name, r.specifier)) else: for e in p.extras: r", "line in sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for line in sorted(dist['required_by']):", "from pip.download import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi", "in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log", "r.specifier)) else: for e in p.extras: r = next( (r for r in", "installed distributions found. \"\"\" results_printed = False for dist in distributions: results_printed =", "required_by, 'extras': extras } file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution):", "logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if", "method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary',", "key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It", "\"\"\" installed = dict( [(p.key, p) for p in pkg_resources.working_set]) query_names = [name.lower()", "dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer'] is", "not args: logger.warning('ERROR: Please provide a package name or names.') return ERROR query", "deps in dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name) for line in sorted(deps): logger.info(\" %s\",", "map(_format_package, reqs) if session: from pip.download import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport", "paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata =", "in pkg_resources.working_set]) query_names = [name.lower() for name in query] distributions = [installed[pkg] for", "License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] =", "a package name or names.') return ERROR query = args if options.pypi: with", "line.strip()) if list_all_files: logger.info(\"Files:\") if 'files' in dist: for line in dist['files']: logger.info(\"", "details from installed distributions. Print distribution name, version, location, and installed files. Installed", "r.key == dist.key), None ) if r: required_by.append( \"%s[%s] %s\" % (p.project_name, e,", "for r in p.requires([e]) if r.key == dist.key), None ) if r: required_by.append(", "= [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p", "in sorted(dist['requires']): logger.info(\" %s\", line) for extra_name, deps in dist['extras'].items(): logger.info(\"Extra Require [%s]:\",", "about installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files',", "in dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name) for line in sorted(deps): logger.info(\" %s\", line.strip())", "**kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of installed", "...\"\"\" summary = 'Show information about installed packages.' def __init__(self, *args, **kw): super(ShowCommand,", "r in p.requires([e]) if r.key == dist.key), None ) if r: required_by.append( \"%s[%s]", "} file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should", "from email.parser import FeedParser import logging import os from pip.basecommand import Command from", "like FeedParser can not deal with repeated headers classifiers = [] for line", "xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN' else:", "if not args: logger.warning('ERROR: Please provide a package name or names.') return ERROR", "'extras': extras } file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): #", "pip.download import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi =", "for dist in distributions: results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\",", "as session: results = search_packages_info(query, options.index, session) else: results = search_packages_info(query, options.index) if", "pkg_resources.DistributionNotFound: pass return \"%s [%s]\" % (r, installed_ver) def search_packages_info(query, index_url=None, session=None): \"\"\"", "can not deal with repeated headers classifiers = [] for line in metadata.splitlines():", "package['files'] = sorted(file_list) yield package def print_results(distributions, list_all_files): \"\"\" Print the informations from", "for extra_name, deps in dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name) for line in sorted(deps):", "%s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email')) if dist['installer'] is not None:", "try: d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass return \"%s [%s]\"", "%s\" % (p.project_name, e, r.specifier)) extras = {} for e in dist.extras: reqs", "distributions: required_by = [] for _, p in installed.items(): r = next((r for", "feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key)", "dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer", "line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if list_all_files:", "return ERROR return SUCCESS def _format_package(requirement): r = requirement installed_ver = '-' try:", "[os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'):", "logger.info(\" %s\", classifier) logger.info(\"Requires:\") for line in sorted(dist['requires']): logger.info(\" %s\", line) for extra_name,", "logger.warning('ERROR: Please provide a package name or names.') return ERROR query = args", "[] for _, p in installed.items(): r = next((r for r in p.requires()", "isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines", "classifier) logger.info(\"Requires:\") for line in sorted(dist['requires']): logger.info(\" %s\", line) for extra_name, deps in", "os from pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR from pip._vendor import", "{ 'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires': requires, 'required_by': required_by,", "for classifier in dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\") for line in sorted(dist['requires']): logger.info(\"", "%s\", line) for extra_name, deps in dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name) for line", "pkg in query_names if pkg in installed] for dist in distributions: required_by =", "= [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for p in", "= \"\"\" %prog [options] <package> ...\"\"\" summary = 'Show information about installed packages.'", "of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l", "or more installed packages.\"\"\" name = 'show' usage = \"\"\" %prog [options] <package>", "in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks", "installed packages.\"\"\" name = 'show' usage = \"\"\" %prog [options] <package> ...\"\"\" summary", "dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for", "ERROR return SUCCESS def _format_package(requirement): r = requirement installed_ver = '-' try: d", "'-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass return \"%s", "search_packages_info(query, index_url=None, session=None): \"\"\" Gather details from installed distributions. Print distribution name, version,", "p in installed.items(): r = next((r for r in p.requires() if r.key ==", "(r for r in p.requires([e]) if r.key == dist.key), None ) if r:", "= entry_points installer = None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip():", "dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None if", "%s\", dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for classifier in dist['classifiers']: logger.info(\" %s\", classifier)", "package name or names.') return ERROR query = args if options.pypi: with self._build_session(options)", "'Show information about installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option(", "in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break package['installer'] = installer # @todo:", "be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0]", "%s\" % (p.project_name, r.specifier)) else: for e in p.extras: r = next( (r", "None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas if", "name = 'show' usage = \"\"\" %prog [options] <package> ...\"\"\" summary = 'Show", "= None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip()", "dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for classifier in dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\")", "paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for p", "__init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show", "- set(dist.requires()) extras[e] = map(_format_package, reqs) if session: from pip.download import PipXmlrpcTransport from", "package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_all_files):", "line) for extra_name, deps in dist['extras'].items(): logger.info(\"Extra Require [%s]:\", extra_name) for line in", "from __future__ import absolute_import from email.parser import FeedParser import logging import os from", "entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None if dist.has_metadata('INSTALLER'): for line", "PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please", "options.index) if not print_results(results, options.files): return ERROR return SUCCESS def _format_package(requirement): r =", "Require [%s]:\", extra_name) for line in sorted(deps): logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by']))", "pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN' else: pypi_version = None requires =", "r: required_by.append( \"%s[%s] %s\" % (p.project_name, e, r.specifier)) extras = {} for e", "Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email:", "import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url,", "= dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None if dist.has_metadata('INSTALLER'): for line in", "have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for", "classifier in dist['classifiers']: logger.info(\" %s\", classifier) logger.info(\"Requires:\") for line in sorted(dist['requires']): logger.info(\" %s\",", "logger.info(\" %s\", line.strip()) logger.info(\"Required by(%d):\", len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(\" %s\", line.strip())", "dist: for line in dist['files']: logger.info(\" %s\", line.strip()) else: logger.info(\"Cannot locate installed-files.txt\") if", "# Classifier: License :: OSI Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier:", "or names.') return ERROR query = args if options.pypi: with self._build_session(options) as session:", "for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if", "for line in sorted(dist['required_by']): logger.info(\" %s\", line.strip()) if list_all_files: logger.info(\"Files:\") if 'files' in", "run(self, options, args): if not args: logger.warning('ERROR: Please provide a package name or", "self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please provide a package", "use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info,", "list of installed files for each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base", "in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata", "\"\"\"Show information about one or more installed packages.\"\"\" name = 'show' usage =", "Print distribution name, version, location, and installed files. Installed files requires a pip", "'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser can", "%s\", dist['version']) if dist['pypi_version']: logger.info(\"PyPi Version: %s\", dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\",", "logger.info(\"Installer: %s\", dist['installer']) logger.info(\"License: %s\", dist.get('license')) logger.info(\"Location: %s\", dist['location']) logger.info(\"Classifiers:\") for classifier in", "pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources logger", "usage = \"\"\" %prog [options] <package> ...\"\"\" summary = 'Show information about installed", "= search_packages_info(query, options.index) if not print_results(results, options.files): return ERROR return SUCCESS def _format_package(requirement):", "in lines] paths = [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p,", "dist['pypi_version']) logger.info(\"Summary: %s\", dist.get('summary')) logger.info(\"Home-page: %s\", dist.get('home-page')) logger.info(\"Author: %s\", dist.get('author')) logger.info(\"Author-email: %s\", dist.get('author-email'))", "repeated headers classifiers = [] for line in metadata.splitlines(): if not line: break", "= dict( [(p.key, p) for p in pkg_resources.working_set]) query_names = [name.lower() for name", "self._build_session(options) as session: results = search_packages_info(query, options.index, session) else: results = search_packages_info(query, options.index)", "the distributions '.egg-info' directory. \"\"\" installed = dict( [(p.key, p) for p in", "def search_packages_info(query, index_url=None, session=None): \"\"\" Gather details from installed distributions. Print distribution name,", "'-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options,", "in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata", "in distributions: results_printed = True logger.info(\"---\") logger.info(\"Metadata-Version: %s\", dist.get('metadata-version')) logger.info(\"Name: %s\", dist['name']) logger.info(\"Version:" ]
[]
[]
[ "before the `EXTENSION` type of the Archived Source Package in the URI. #", "setting up `HTTPD` Dependency components.\", 'OS_SUPPORT' : { 'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL']", ": \"Build Automation for setting up `HTTPD` Dependency components.\", 'OS_SUPPORT' : { 'Unix':", "store the Build Information # for the `PCRE` source build (i.e., this is", "and install on the target host. # Set the PCRE Download URL. DOWNLOAD_URL", "'--disable-static'] # Have the `PREFIX` set here, and we will include the `CONFIGURE`", "up `HTTPD` Dependency components.\", 'OS_SUPPORT' : { 'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] }", "`TAR` final # extract directory. # Below is the `TAR` extraction directory for", "at run-time # from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} # Set", "PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE` component name to be used # program wide.", "Set the log location options, to store the Build Information # for the", "the necessary imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the configuration options", "This is the identifier returned by # the DownloadManager Module. # It references", "# Get the configuration options set for the PCRE build # and install", "build # and install on the target host. # Set the PCRE Download", "`PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD` name that executes the untar logic for", "# be followed within the `TAR` final # extract directory. # Below is", "'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value from the above Downloads URI. # It would", "untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package type name convention to", "would be present before the `EXTENSION` type of the Archived Source Package in", "host. # Set the PCRE Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this", "suit your environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR` location for the `PCRE`", "for the `CONFIGURE`, `MAKE` and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION =", "convention to # be followed within the `TAR` final # extract directory. #", "Make all the necessary imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the", "for the `PCRE` source build (i.e., this is for the `CONFIGURE`, `MAKE` and", "location parameter to # suit your environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR`", "Options that needs to be set. # For more Options, just add the", "Define the Configure time Options that needs to be set. # For more", "and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION + PCRE_BUILD_PROCESS_LOG_DIRECTORY PCRE_SUBPROCESS_LOG_FILENAME", "directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location to keep the `PCRE` binary.", "final # extract directory. # Below is the `TAR` extraction directory for `PCRE`.", "= '/usr/local/pcre-' # `DOCDIR` location for the `PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' #", "environment details. ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE' : 'Generic", "'--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have the `PREFIX` set here, and we will", "the `CONFIGURE`, `MAKE` and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION", "############################################################## # Get the configuration options set for the PCRE build # and", "for setting up `HTTPD` Dependency components.\", 'OS_SUPPORT' : { 'Unix': ['Solaris'], 'Linux': ['Ubuntu',", "URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value from the above Downloads URI.", "directory. # Below is the `TAR` extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/'", "############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the configuration options set for the PCRE", "more Options, just add the directive to the below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties',", "'Pcre' # `PCRE_TAR` component name to be used # program wide. # This", "needs to be set. # For more Options, just add the directive to", "Import Section. # Make all the necessary imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ##############################################################", ": 'Generic [Automated]', 'DESCRIPTION' : \"Build Automation for setting up `HTTPD` Dependency components.\",", "the directive to the below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static']", "name convention to # be followed within the `TAR` final # extract directory.", "# Set the log location options, to store the Build Information # for", "the `CONFIGURE` line options at run-time # from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS =", "Set the PCRE Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value from", "keep the `PCRE` binary. # Change this location parameter to # suit your", "# Set the `DOCDIR` flag as an entry in the `DOCDIR` dictionary. PCRE_DOCDIR", "here, and we will include the `CONFIGURE` line options at run-time # from", "the `DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} # Define the Configure", "is the `TAR` extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location to", "import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the configuration options set for the PCRE build", "# the DownloadManager Module. # It references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar'", "be used # program wide. # Used to reference or label `PCRE` #", "standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR` location for the `PCRE` install. PCRE_DOCDIR_LOCATION =", "= 'PCRE::UNTAR::THREAD' # Package type name convention to # be followed within the", "executes the untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package type name", "# from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} # Set the log", "the URI. # Use it to enable the configure time `PREFIX` and `DOC_DIR`", "and we will include the `CONFIGURE` line options at run-time # from the", "list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have the `PREFIX` set", "PCRE build # and install on the target host. # Set the PCRE", "['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] } } # `THREAD` name that executes the download", "`PREFIX` set here, and we will include the `CONFIGURE` line options at run-time", "the identifier returned by # the DownloadManager Module. # It references the `PCRE_TAR`", "type name convention to # be followed within the `TAR` final # extract", "nuances. PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR` component name to be used # program", "component name to be used # program wide. # Used to reference or", "for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD` name that executes the untar logic", "value from the above Downloads URI. # It would be present before the", "below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have the `PREFIX`", "options at run-time # from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} #", "'OS_SUPPORT' : { 'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] } } # `THREAD` name", "environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR` location for the `PCRE` install. PCRE_DOCDIR_LOCATION", "`PCRE` source build (i.e., this is for the `CONFIGURE`, `MAKE` and `MAKE INSTALL`", "ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION'", "`PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE` component name to be used #", "PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location to keep the `PCRE` binary. # Change this", "############################################################## # Module Import Section. # Make all the necessary imports here. ##############################################################", "'--prefix='} # Set the log location options, to store the Build Information #", "is the identifier returned by # the DownloadManager Module. # It references the", "`BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} # Set the log location options, to", "PCRE_VERSION = '8.40' # `PCRE` build environment details. ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__',", "`HTTPD` Dependency components.\", 'OS_SUPPORT' : { 'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] } }", "the download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD` name that executes", "# `DOCDIR` location for the `PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE` component", "be referenced after the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set the", "will include the `CONFIGURE` line options at run-time # from the `BUILD_SUPERVISOR` Script.", "references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The `PCRE_TAR` extract name to", "= ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have the `PREFIX` set here, and", "# Get this value from the above Downloads URI. # It would be", "(i.e., this is for the `CONFIGURE`, `MAKE` and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY =", "{ 'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] } } # `THREAD` name that executes", "Options, just add the directive to the below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16',", "`EXTENSION` type of the Archived Source Package in the URI. # Use it", "Archived Source Package in the URI. # Use it to enable the configure", "name to be used # program wide. # This is the identifier returned", "Module. # It references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The `PCRE_TAR`", "all the necessary imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the configuration", "the log location options, to store the Build Information # for the `PCRE`", "None, 'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION' : \"Build Automation for setting up `HTTPD`", "build (i.e., this is for the `CONFIGURE`, `MAKE` and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY", "binary. # Change this location parameter to # suit your environment standards. PCRE_BINARY_LOCATION", "above Downloads URI. # It would be present before the `EXTENSION` type of", "`DOCDIR` location for the `PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE` component name", "package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The `PCRE_TAR` extract name to be referenced after", "build environment details. ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE' :", "this location parameter to # suit your environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' #", "{'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} # Define the Configure time Options that needs to", "PCRE_DOCDIR_LOCATION} # Define the Configure time Options that needs to be set. #", "For more Options, just add the directive to the below list. ENABLE_OPTIONS_FLAGS =", "imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the configuration options set for", "# Have the `PREFIX` set here, and we will include the `CONFIGURE` line", "include the `CONFIGURE` line options at run-time # from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS", "Get the configuration options set for the PCRE build # and install on", "PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set the `DOCDIR` flag as an entry in the", "name that executes the untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package", "Below is the `TAR` extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location", "time Options that needs to be set. # For more Options, just add", "Get this value from the above Downloads URI. # It would be present", "enable the configure time `PREFIX` and `DOC_DIR` options. PCRE_VERSION = '8.40' # `PCRE`", "used # program wide. # Used to reference or label `PCRE` # related", "logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD` name that executes the untar", "component name to be used # program wide. # This is the identifier", "for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package type name convention to # be", "install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE` component name to be used # program", "the configuration options set for the PCRE build # and install on the", "`PCRE_TAR` extract name to be referenced after the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME =", "# Set the PCRE Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value", "extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location to keep the `PCRE`", "referenced after the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set the `DOCDIR`", "just add the directive to the below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32',", "Dependency components.\", 'OS_SUPPORT' : { 'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] } } #", "'/usr/share/doc/pcre-' # `PCRE` component name to be used # program wide. # Used", "to the below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have", "the DownloadManager Module. # It references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' #", "wide. # This is the identifier returned by # the DownloadManager Module. #", "for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location to keep the `PCRE` binary. #", "`PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The `PCRE_TAR` extract name to be referenced", "dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} # Define the Configure time Options", "parameter to # suit your environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR` location", "related nuances. PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR` component name to be used #", "after the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set the `DOCDIR` flag", "Automation for setting up `HTTPD` Dependency components.\", 'OS_SUPPORT' : { 'Unix': ['Solaris'], 'Linux':", "`PCRE` # related nuances. PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR` component name to be", "'__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION' : \"Build Automation for", "components.\", 'OS_SUPPORT' : { 'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] } } # `THREAD`", "to be set. # For more Options, just add the directive to the", "Downloads URI. # It would be present before the `EXTENSION` type of the", "The `PCRE_TAR` extract name to be referenced after the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME", "program wide. # Used to reference or label `PCRE` # related nuances. PCRE_COMPONENT_NAME", "'Generic [Automated]', 'DESCRIPTION' : \"Build Automation for setting up `HTTPD` Dependency components.\", 'OS_SUPPORT'", "{'prefix_options': '--prefix='} # Set the log location options, to store the Build Information", "= '8.40' # `PCRE` build environment details. ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY'", "flag as an entry in the `DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir=' +", "= 'Pcre_Tar_ExtractName' # Set the `DOCDIR` flag as an entry in the `DOCDIR`", "to reference or label `PCRE` # related nuances. PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR`", "['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have the `PREFIX` set here, and we", "wide. # Used to reference or label `PCRE` # related nuances. PCRE_COMPONENT_NAME =", "It would be present before the `EXTENSION` type of the Archived Source Package", "to be referenced after the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set", "to be used # program wide. # Used to reference or label `PCRE`", "set for the PCRE build # and install on the target host. #", "set here, and we will include the `CONFIGURE` line options at run-time #", "} } # `THREAD` name that executes the download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME", "from the above Downloads URI. # It would be present before the `EXTENSION`", "`PCRE_TAR` component name to be used # program wide. # This is the", "in the `DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} # Define the", "Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value from the above Downloads", "'RHEL'] } } # `THREAD` name that executes the download logic for `PCRE`.", "'/usr/local/pcre-' # `DOCDIR` location for the `PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE`", "`CONFIGURE` line options at run-time # from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options':", "an entry in the `DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} #", "# program wide. # This is the identifier returned by # the DownloadManager", "'PCRE::DOWNLOADER::THREAD' # `THREAD` name that executes the untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME =", ": None, 'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION' : \"Build Automation for setting up", "# Make all the necessary imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get", "# Package type name convention to # be followed within the `TAR` final", "Use it to enable the configure time `PREFIX` and `DOC_DIR` options. PCRE_VERSION =", "to # be followed within the `TAR` final # extract directory. # Below", "PCRE Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value from the above", "'Pcre_Tar_ExtractName' # Set the `DOCDIR` flag as an entry in the `DOCDIR` dictionary.", "`DOCDIR` flag as an entry in the `DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir='", "Section. # Make all the necessary imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## #", "INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION + PCRE_BUILD_PROCESS_LOG_DIRECTORY PCRE_SUBPROCESS_LOG_FILENAME = PCRE_SUBPROCESS_LOG_LOCATION", "Build Information # for the `PCRE` source build (i.e., this is for the", "# `PCRE` build environment details. ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None,", "# It would be present before the `EXTENSION` type of the Archived Source", "present before the `EXTENSION` type of the Archived Source Package in the URI.", "Used to reference or label `PCRE` # related nuances. PCRE_COMPONENT_NAME = 'Pcre' #", "PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package type name convention to # be followed within", "'PCRE::UNTAR::THREAD' # Package type name convention to # be followed within the `TAR`", "# Define the Configure time Options that needs to be set. # For", "and `DOC_DIR` options. PCRE_VERSION = '8.40' # `PCRE` build environment details. ENVIRONMENT =", "'DESCRIPTION' : \"Build Automation for setting up `HTTPD` Dependency components.\", 'OS_SUPPORT' : {", "'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION' : \"Build Automation for setting up `HTTPD` Dependency", "DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value from the above Downloads URI. #", "`THREAD` name that executes the untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' #", "extract name to be referenced after the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName'", "PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR` component name to be used # program wide.", "= 'Pcre' # `PCRE_TAR` component name to be used # program wide. #", "# It references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The `PCRE_TAR` extract", "line options at run-time # from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='}", "'Pcre_Tar' # The `PCRE_TAR` extract name to be referenced after the # untarring", "the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The `PCRE_TAR` extract name to be", "name that executes the download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD`", "Change this location parameter to # suit your environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-'", "name to be referenced after the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' #", "the above Downloads URI. # It would be present before the `EXTENSION` type", "= {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} # Define the Configure time Options that needs", "`PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location to keep the `PCRE` binary. # Change", "location for the `PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE` component name to", "'Pcre/' # Location to keep the `PCRE` binary. # Change this location parameter", "# This is the identifier returned by # the DownloadManager Module. # It", "`PCRE` component name to be used # program wide. # Used to reference", "is for the `CONFIGURE`, `MAKE` and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION", "# Used to reference or label `PCRE` # related nuances. PCRE_COMPONENT_NAME = 'Pcre'", "helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the configuration options set for the PCRE build #", "`PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package type name convention to # be followed", "options, to store the Build Information # for the `PCRE` source build (i.e.,", "the below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have the", "source build (i.e., this is for the `CONFIGURE`, `MAKE` and `MAKE INSTALL` processes).", "the Configure time Options that needs to be set. # For more Options,", "{ 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION' : \"Build", "# For more Options, just add the directive to the below list. ENABLE_OPTIONS_FLAGS", "`CONFIGURE`, `MAKE` and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION +", "# untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set the `DOCDIR` flag as an", "untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set the `DOCDIR` flag as an entry", "the target host. # Set the PCRE Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' #", "'Linux': ['Ubuntu', 'RHEL'] } } # `THREAD` name that executes the download logic", "'--docdir=' + PCRE_DOCDIR_LOCATION} # Define the Configure time Options that needs to be", "'DEPENDENCY' : None, 'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION' : \"Build Automation for setting", "as an entry in the `DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION}", "PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} # Define the Configure time Options that", "`PREFIX` and `DOC_DIR` options. PCRE_VERSION = '8.40' # `PCRE` build environment details. ENVIRONMENT", "= 'Pcre_Tar' # The `PCRE_TAR` extract name to be referenced after the #", "# Module Import Section. # Make all the necessary imports here. ############################################################## import", "Configure time Options that needs to be set. # For more Options, just", "set. # For more Options, just add the directive to the below list.", "your environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR` location for the `PCRE` install.", "name to be used # program wide. # Used to reference or label", "'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] } } # `THREAD` name that executes the", "from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} # Set the log location", "to enable the configure time `PREFIX` and `DOC_DIR` options. PCRE_VERSION = '8.40' #", "# Use it to enable the configure time `PREFIX` and `DOC_DIR` options. PCRE_VERSION", "this value from the above Downloads URI. # It would be present before", "time `PREFIX` and `DOC_DIR` options. PCRE_VERSION = '8.40' # `PCRE` build environment details.", "to # suit your environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR` location for", "followed within the `TAR` final # extract directory. # Below is the `TAR`", "['Ubuntu', 'RHEL'] } } # `THREAD` name that executes the download logic for", "here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the configuration options set for the", "target host. # Set the PCRE Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get", "program wide. # This is the identifier returned by # the DownloadManager Module.", "# `THREAD` name that executes the untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD'", "# Below is the `TAR` extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' #", "add the directive to the below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz',", "for the PCRE build # and install on the target host. # Set", "install on the target host. # Set the PCRE Download URL. DOWNLOAD_URL =", "`PCRE` build environment details. ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE'", "be used # program wide. # This is the identifier returned by #", "be followed within the `TAR` final # extract directory. # Below is the", "Location to keep the `PCRE` binary. # Change this location parameter to #", "the `PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE` component name to be used", "to be used # program wide. # This is the identifier returned by", "'--enable-pcregrep-libz', '--disable-static'] # Have the `PREFIX` set here, and we will include the", "Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} # Set the log location options, to store", "options set for the PCRE build # and install on the target host.", "= 'Pcre/' # Location to keep the `PCRE` binary. # Change this location", "the PCRE Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value from the", "'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION' : \"Build Automation", "processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION + PCRE_BUILD_PROCESS_LOG_DIRECTORY PCRE_SUBPROCESS_LOG_FILENAME = PCRE_SUBPROCESS_LOG_LOCATION +", "= '/usr/share/doc/pcre-' # `PCRE` component name to be used # program wide. #", "'8.40' # `PCRE` build environment details. ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' :", "`DOC_DIR` options. PCRE_VERSION = '8.40' # `PCRE` build environment details. ENVIRONMENT = {", "Package type name convention to # be followed within the `TAR` final #", "configuration options set for the PCRE build # and install on the target", "= {'prefix_options': '--prefix='} # Set the log location options, to store the Build", "Set the `DOCDIR` flag as an entry in the `DOCDIR` dictionary. PCRE_DOCDIR =", "`THREAD` name that executes the download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' #", "`PCRE` binary. # Change this location parameter to # suit your environment standards.", "`DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} # Define the Configure time", "the PCRE build # and install on the target host. # Set the", "operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set the `DOCDIR` flag as an entry in", "# related nuances. PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR` component name to be used", "URI. # It would be present before the `EXTENSION` type of the Archived", "# `THREAD` name that executes the download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD'", "the `TAR` final # extract directory. # Below is the `TAR` extraction directory", "PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD` name that executes the untar logic for `PCRE`.", "configure time `PREFIX` and `DOC_DIR` options. PCRE_VERSION = '8.40' # `PCRE` build environment", "to store the Build Information # for the `PCRE` source build (i.e., this", "# suit your environment standards. PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR` location for the", "the Build Information # for the `PCRE` source build (i.e., this is for", ": { 'Unix': ['Solaris'], 'Linux': ['Ubuntu', 'RHEL'] } } # `THREAD` name that", "<filename>AutomationKit_InfrastructureServices@SVU/helpers/BuildConfig/Pcre/PcreConfig.py<gh_stars>0 ############################################################## # Module Import Section. # Make all the necessary imports here.", "the `DOCDIR` flag as an entry in the `DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options':", "= 'PCRE::DOWNLOADER::THREAD' # `THREAD` name that executes the untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME", "\"Build Automation for setting up `HTTPD` Dependency components.\", 'OS_SUPPORT' : { 'Unix': ['Solaris'],", "within the `TAR` final # extract directory. # Below is the `TAR` extraction", "used # program wide. # This is the identifier returned by # the", "# Change this location parameter to # suit your environment standards. PCRE_BINARY_LOCATION =", "to keep the `PCRE` binary. # Change this location parameter to # suit", "'--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have the `PREFIX` set here, and we will include", "URI. # Use it to enable the configure time `PREFIX` and `DOC_DIR` options.", "that executes the download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD` name", "# and install on the target host. # Set the PCRE Download URL.", "ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] # Have the `PREFIX` set here,", "the configure time `PREFIX` and `DOC_DIR` options. PCRE_VERSION = '8.40' # `PCRE` build", "entry in the `DOCDIR` dictionary. PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION} # Define", "location options, to store the Build Information # for the `PCRE` source build", "the untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package type name convention", "the `PCRE` source build (i.e., this is for the `CONFIGURE`, `MAKE` and `MAKE", "PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The `PCRE_TAR` extract name to be referenced after the", "= { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE' : 'Generic [Automated]', 'DESCRIPTION' :", "be present before the `EXTENSION` type of the Archived Source Package in the", "`TAR` extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location to keep the", "Have the `PREFIX` set here, and we will include the `CONFIGURE` line options", "Package in the URI. # Use it to enable the configure time `PREFIX`", "be set. # For more Options, just add the directive to the below", "reference or label `PCRE` # related nuances. PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR` component", "# The `PCRE_TAR` extract name to be referenced after the # untarring operation.", "executes the download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD` name that", "# for the `PCRE` source build (i.e., this is for the `CONFIGURE`, `MAKE`", "run-time # from the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} # Set the", "type of the Archived Source Package in the URI. # Use it to", "options. PCRE_VERSION = '8.40' # `PCRE` build environment details. ENVIRONMENT = { 'BUILD_TARGET':", "identifier returned by # the DownloadManager Module. # It references the `PCRE_TAR` package.", "returned by # the DownloadManager Module. # It references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME", "Source Package in the URI. # Use it to enable the configure time", "it to enable the configure time `PREFIX` and `DOC_DIR` options. PCRE_VERSION = '8.40'", "download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD' # `THREAD` name that executes the", "for the `PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-' # `PCRE` component name to be", "DownloadManager Module. # It references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The", "# `PCRE_TAR` component name to be used # program wide. # This is", "this is for the `CONFIGURE`, `MAKE` and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/'", "`MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION + PCRE_BUILD_PROCESS_LOG_DIRECTORY PCRE_SUBPROCESS_LOG_FILENAME =", "necessary imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig ############################################################## # Get the configuration options set", "} # `THREAD` name that executes the download logic for `PCRE`. PCRE_DOWNLOADER_THREAD_NAME =", "It references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar' # The `PCRE_TAR` extract name", "= 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz' # Get this value from the above Downloads URI. # It", "extract directory. # Below is the `TAR` extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION =", "the `BUILD_SUPERVISOR` Script. INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} # Set the log location options,", "directive to the below list. ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32', '--enable-pcregrep-libz', '--disable-static'] #", "# Location to keep the `PCRE` binary. # Change this location parameter to", "on the target host. # Set the PCRE Download URL. DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz'", "logic for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package type name convention to #", "Information # for the `PCRE` source build (i.e., this is for the `CONFIGURE`,", "or label `PCRE` # related nuances. PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR` component name", "INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='} # Set the log location options, to store the", "+ PCRE_DOCDIR_LOCATION} # Define the Configure time Options that needs to be set.", "`MAKE` and `MAKE INSTALL` processes). PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION + PCRE_BUILD_PROCESS_LOG_DIRECTORY", "# extract directory. # Below is the `TAR` extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION", "the `PCRE` binary. # Change this location parameter to # suit your environment", "# program wide. # Used to reference or label `PCRE` # related nuances.", "the Archived Source Package in the URI. # Use it to enable the", "we will include the `CONFIGURE` line options at run-time # from the `BUILD_SUPERVISOR`", "# `PCRE` component name to be used # program wide. # Used to", "that needs to be set. # For more Options, just add the directive", "log location options, to store the Build Information # for the `PCRE` source", "of the Archived Source Package in the URI. # Use it to enable", "details. ENVIRONMENT = { 'BUILD_TARGET': '__PCRE__', 'DEPENDENCY' : None, 'BUILD_TYPE' : 'Generic [Automated]',", "the # untarring operation. PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName' # Set the `DOCDIR` flag as", "PCRE_BINARY_LOCATION = '/usr/local/pcre-' # `DOCDIR` location for the `PCRE` install. PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-'", "label `PCRE` # related nuances. PCRE_COMPONENT_NAME = 'Pcre' # `PCRE_TAR` component name to", "Module Import Section. # Make all the necessary imports here. ############################################################## import helpers.BuildConfig.Logger.LoggerConfig", "in the URI. # Use it to enable the configure time `PREFIX` and", "the `TAR` extraction directory for `PCRE`. PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/' # Location to keep", "that executes the untar logic for `PCRE`. PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD' # Package type", "PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/' PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION + PCRE_BUILD_PROCESS_LOG_DIRECTORY PCRE_SUBPROCESS_LOG_FILENAME = PCRE_SUBPROCESS_LOG_LOCATION + 'Pcre_Subprocess.log'", "the `PREFIX` set here, and we will include the `CONFIGURE` line options at", "by # the DownloadManager Module. # It references the `PCRE_TAR` package. PCRE_TAR_COMPONENT_NAME =", "[Automated]', 'DESCRIPTION' : \"Build Automation for setting up `HTTPD` Dependency components.\", 'OS_SUPPORT' :", "the `EXTENSION` type of the Archived Source Package in the URI. # Use" ]
[ "parser = argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option:", "positioners') args = parser.parse_args() start_date = args.start end_date = args.end option = args.option", "= [x for x in all_pos if x not in finished_pos] print('Running for", "args.end option = args.option print(option) positioners = args.positioners print(positioners) if positioners is None:", "positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool =", "<reponame>desihub/desiperf \"\"\" Get Positioner data \"\"\" import data_mgt.get_pos_data import argparse from itertools import", "from itertools import repeat import os import pandas as pd import numpy as", "'/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start date') parser.add_argument('end',", "parser.add_argument('-o','--option', help='option: new, update (default)', default = 'update') parser.add_argument(\"-p\", \"--positioners\", help = 'List", "#print(finished_pos) #all_pos = [x for x in all_pos if x not in finished_pos]", "= multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df), repeat(fiberpos), repeat(ptl_dbs), repeat(option))) pool.terminate()", "end_date = args.end option = args.option print(option) positioners = args.positioners print(positioners) if positioners", "help='start date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option: new, update (default)', default = 'update')", "f in fin] #print(finished_pos) #all_pos = [x for x in all_pos if x", "finished_pos] print('Running for {} positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs =", "import datetime import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv'))", "pandas as pd import numpy as np from datetime import datetime import multiprocessing", "zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df), repeat(fiberpos), repeat(ptl_dbs), repeat(option))) pool.terminate() print(\"total time: \",(datetime.now()-start_time).total_seconds()/60.)", "import os import pandas as pd import numpy as np from datetime import", "of positioners') args = parser.parse_args() start_date = args.start end_date = args.end option =", "= 'List of positioners') args = parser.parse_args() start_date = args.start end_date = args.end", "update (default)', default = 'update') parser.add_argument(\"-p\", \"--positioners\", help = 'List of positioners') args", "default = 'update') parser.add_argument(\"-p\", \"--positioners\", help = 'List of positioners') args = parser.parse_args()", "fin] #print(finished_pos) #all_pos = [x for x in all_pos if x not in", "argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option: new, update", "#os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start", "= datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run,", "option = args.option print(option) positioners = args.positioners print(positioners) if positioners is None: all_pos", "print(option) positioners = args.positioners print(positioners) if positioners is None: all_pos = np.unique(fiberpos.CAN_ID) else:", "parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option: new, update (default)', default = 'update') parser.add_argument(\"-p\", \"--positioners\",", "import argparse from itertools import repeat import os import pandas as pd import", "import numpy as np from datetime import datetime import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local'", "Positioner data') parser.add_argument('start', help='start date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option: new, update (default)',", "list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin] #print(finished_pos) #all_pos = [x for", "datetime import datetime import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos =", "#finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin]", "args = parser.parse_args() start_date = args.start end_date = args.end option = args.option print(option)", "args.start end_date = args.end option = args.option print(option) positioners = args.positioners print(positioners) if", "[x for x in all_pos if x not in finished_pos] print('Running for {}", "date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option: new, update (default)', default = 'update') parser.add_argument(\"-p\",", "= argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option: new,", "= data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df),", "telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date),", "data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df), repeat(fiberpos),", "= args.start end_date = args.end option = args.option print(option) positioners = args.positioners print(positioners)", "= list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin] #print(finished_pos) #all_pos = [x", "datetime import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser", "= np.unique(fiberpos.CAN_ID) else: all_pos = positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos", "in finished_pos] print('Running for {} positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs", "pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option',", "print(positioners) if positioners is None: all_pos = np.unique(fiberpos.CAN_ID) else: all_pos = positioners #finished", "'List of positioners') args = parser.parse_args() start_date = args.start end_date = args.end option", "for x in all_pos if x not in finished_pos] print('Running for {} positioners'.format(len(all_pos)))", "= pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin] #print(finished_pos)", "#fin = list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin] #print(finished_pos) #all_pos =", "pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df), repeat(fiberpos), repeat(ptl_dbs), repeat(option))) pool.terminate() print(\"total time:", "data \"\"\" import data_mgt.get_pos_data import argparse from itertools import repeat import os import", "from datetime import datetime import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos", "= '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start date')", "in fin] #print(finished_pos) #all_pos = [x for x in all_pos if x not", "datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos,", "pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df), repeat(fiberpos), repeat(ptl_dbs), repeat(option)))", "= '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner data')", "positioners = args.positioners print(positioners) if positioners is None: all_pos = np.unique(fiberpos.CAN_ID) else: all_pos", "#os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner", "np.unique(fiberpos.CAN_ID) else: all_pos = positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos =", "help='end date') parser.add_argument('-o','--option', help='option: new, update (default)', default = 'update') parser.add_argument(\"-p\", \"--positioners\", help", "'update') parser.add_argument(\"-p\", \"--positioners\", help = 'List of positioners') args = parser.parse_args() start_date =", "None: all_pos = np.unique(fiberpos.CAN_ID) else: all_pos = positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin =", "= positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f", "import pandas as pd import numpy as np from datetime import datetime import", "= 'update') parser.add_argument(\"-p\", \"--positioners\", help = 'List of positioners') args = parser.parse_args() start_date", "x not in finished_pos] print('Running for {} positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base, telem_df,", "pd import numpy as np from datetime import datetime import multiprocessing #os.environ['DATA_DIR'] =", "else: all_pos = positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0])", "print('Running for {} positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date,", "exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date),", "numpy as np from datetime import datetime import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR']", "{} positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool", "import repeat import os import pandas as pd import numpy as np from", "= args.end option = args.option print(option) positioners = args.positioners print(positioners) if positioners is", "'/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start',", "help = 'List of positioners') args = parser.parse_args() start_date = args.start end_date =", "= args.option print(option) positioners = args.positioners print(positioners) if positioners is None: all_pos =", "all_pos = np.unique(fiberpos.CAN_ID) else: all_pos = positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1]", "in all_pos if x not in finished_pos] print('Running for {} positioners'.format(len(all_pos))) start_time =", "parser.parse_args() start_date = args.start end_date = args.end option = args.option print(option) positioners =", "#finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin] #print(finished_pos) #all_pos = [x for x", "not in finished_pos] print('Running for {} positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base, telem_df, coord_df,", "= [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin] #print(finished_pos) #all_pos = [x for x in", "all_pos if x not in finished_pos] print('Running for {} positioners'.format(len(all_pos))) start_time = datetime.now()", "as np from datetime import datetime import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] =", "data') parser.add_argument('start', help='start date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option: new, update (default)', default", "coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base),", "if positioners is None: all_pos = np.unique(fiberpos.CAN_ID) else: all_pos = positioners #finished =", "for f in fin] #print(finished_pos) #all_pos = [x for x in all_pos if", "help='option: new, update (default)', default = 'update') parser.add_argument(\"-p\", \"--positioners\", help = 'List of", "= pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start date') parser.add_argument('end', help='end date')", "if x not in finished_pos] print('Running for {} positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base,", "(default)', default = 'update') parser.add_argument(\"-p\", \"--positioners\", help = 'List of positioners') args =", "multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update", "[int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin] #print(finished_pos) #all_pos = [x for x in all_pos", "positioners is None: all_pos = np.unique(fiberpos.CAN_ID) else: all_pos = positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None)", "is None: all_pos = np.unique(fiberpos.CAN_ID) else: all_pos = positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin", "start_date = args.start end_date = args.end option = args.option print(option) positioners = args.positioners", "multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df), repeat(fiberpos), repeat(ptl_dbs), repeat(option))) pool.terminate() print(\"total", "itertools import repeat import os import pandas as pd import numpy as np", "parser.add_argument('start', help='start date') parser.add_argument('end', help='end date') parser.add_argument('-o','--option', help='option: new, update (default)', default =", "positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in", "= args.positioners print(positioners) if positioners is None: all_pos = np.unique(fiberpos.CAN_ID) else: all_pos =", "all_pos = positioners #finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for", "data_mgt.get_pos_data import argparse from itertools import repeat import os import pandas as pd", "date') parser.add_argument('-o','--option', help='option: new, update (default)', default = 'update') parser.add_argument(\"-p\", \"--positioners\", help =", "\"--positioners\", help = 'List of positioners') args = parser.parse_args() start_date = args.start end_date", "os import pandas as pd import numpy as np from datetime import datetime", "fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser = argparse.ArgumentParser(description='Update Positioner data') parser.add_argument('start', help='start date') parser.add_argument('end', help='end", "args.positioners print(positioners) if positioners is None: all_pos = np.unique(fiberpos.CAN_ID) else: all_pos = positioners", "import data_mgt.get_pos_data import argparse from itertools import repeat import os import pandas as", "x in all_pos if x not in finished_pos] print('Running for {} positioners'.format(len(all_pos))) start_time", "= parser.parse_args() start_date = args.start end_date = args.end option = args.option print(option) positioners", "pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None) #fin = list(finished[0])[:-1] #finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin] #print(finished_pos) #all_pos", "\"\"\" Get Positioner data \"\"\" import data_mgt.get_pos_data import argparse from itertools import repeat", "repeat import os import pandas as pd import numpy as np from datetime", "as pd import numpy as np from datetime import datetime import multiprocessing #os.environ['DATA_DIR']", "#all_pos = [x for x in all_pos if x not in finished_pos] print('Running", "ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df),", "Positioner data \"\"\" import data_mgt.get_pos_data import argparse from itertools import repeat import os", "parser.add_argument(\"-p\", \"--positioners\", help = 'List of positioners') args = parser.parse_args() start_date = args.start", "args.option print(option) positioners = args.positioners print(positioners) if positioners is None: all_pos = np.unique(fiberpos.CAN_ID)", "np from datetime import datetime import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt'", "new, update (default)', default = 'update') parser.add_argument(\"-p\", \"--positioners\", help = 'List of positioners')", "Get Positioner data \"\"\" import data_mgt.get_pos_data import argparse from itertools import repeat import", "\"\"\" import data_mgt.get_pos_data import argparse from itertools import repeat import os import pandas", "argparse from itertools import repeat import os import pandas as pd import numpy", "import multiprocessing #os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local' #os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt' fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv')) parser =", "end_date) pool = multiprocessing.Pool(processes=64) pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df), repeat(fiberpos), repeat(ptl_dbs),", "for {} positioners'.format(len(all_pos))) start_time = datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date)", "start_time = datetime.now() exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date) pool = multiprocessing.Pool(processes=64)" ]
[ "layers to 512 * Adjusted flattening to match CIFAR-10 shapes * Replaced dropout", "v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list): v, kernel_size,", "(epoch_id is not None) and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(),", "= x.view(x.size(0), -1) x = self.classifier(x) x = F.log_softmax(x, dim=1) return x class", "k, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def", "on Caffe's implementation of Lenet-5 and is slightly different from the vanilla LeNet-5.", "def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes or no? layers =", "'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x = x.view(x.size(0),", "nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2,", "as nn import torch.nn.functional as F import scipy.io as scio from torchvision.models import", "LeNet_5_Caffe(nn.Module): \"\"\" This is based on Caffe's implementation of Lenet-5 and is slightly", "LeNet-5. \"\"\" def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias)", "512, 'M'], 'like': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M',", "else: if isinstance(v, list): v, kernel_size, padding = v[0], 1, 0 else: kernel_size,", "'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3", "of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes,", "F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16", "or no? layers = [] for idx, v in enumerate(config): if v ==", "feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x =", "'M', 512, 512, 512, 'M'], 'like': [64, 64, 'M', 128, 128, 'M', 256,", "enable_bias=True, in_channels=3): # TODO: BN yes or no? layers = [] for idx,", "__init__(self, enable_bias=True): # original code is true super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias)", "\"\"\" def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2", "v in enumerate(config): if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if", "batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in {'C', 'D'}:", "super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3 =", "torch.nn.functional as F import scipy.io as scio from torchvision.models import vgg19_bn, resnet152, densenet161", "in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4", "bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x", "'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x = x.view(x.size(0), -1) x = self.classifier(x)", "(16 layers) 2. VGG-D (16 layers) 3. VGG-like Some of the differences: *", "'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias)", "def forward(self, x): x = F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1)", "v[0], 1, 0 else: kernel_size, padding = 3, 1 conv2d = nn.Conv2d(in_channels, v,", "= F.relu(self.fc3(x.view(-1, 16 * 5 * 5))) x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x),", "based on Caffe's implementation of Lenet-5 and is slightly different from the vanilla", "padding and therefore intermediate shapes do not match the official LeNet-5. \"\"\" def", "= nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 = nn.Linear(16 * 5 * 5, 120,", "50 * 4 * 4))) x = F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS =", "784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__()", "2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16 *", "F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1,", "stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True))", "nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of", "LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2", "kernel_size, padding = 3, 1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if", "= F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16 * 5 * 5))) x =", "10, bias=enable_bias) def forward(self, x): x = F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return", "0 else: kernel_size, padding = 3, 1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding,", "2. VGG-D (16 layers) 3. VGG-like Some of the differences: * Reduced size", "= self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x =", "enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50,", "512, bias=enable_bias), # 512 * 7 * 7 in the original VGG nn.ReLU(True),", "x = F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def", "= self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id is not None) and (batch_id is", "from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3,", "nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config == 'like':", "elif config == 'like': self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 *", "bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 = nn.Linear(16 * 5 *", "= enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2", "in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in {'C',", "self.conv_base(x) x = x.view(x.size(0), -1) x = self.fc_base(x) x = F.log_softmax(x, dim=1) return", "is a base class to generate three VGG variants used in SNIP paper:", "self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'],", "Some of the differences: * Reduced size of FC layers to 512 *", "no? layers = [] for idx, v in enumerate(config): if v == 'M':", "VGG-like Some of the differences: * Reduced size of FC layers to 512", "* 4))) x = F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS = { 'C': [64,", "self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3)", "64, 'M', 128, 128, 'M', 256, 256, [256], 'M', 512, 512, [512], 'M',", "(batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2':", "else: kernel_size, padding = 3, 1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias)", "512, 512, 512, 'M', 512, 512, 512, 'M']} class VGG(nn.Module): \"\"\" This is", "= self.features(input) x = x.view(x.size(0), -1) x = self.classifier(x) x = F.log_softmax(x, dim=1)", "bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x =", "x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module): \"\"\" This", "of dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True,", "512, 'M', 512, 512, 512, 'M'], 'like': [64, 64, 'M', 128, 128, 'M',", "* k, num_classes)) def forward(self, x): x = self.conv_base(x) x = x.view(x.size(0), -1)", "= nn.Linear(500, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x,", "enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16,", "num_classes)) def forward(self, x): x = self.conv_base(x) x = x.view(x.size(0), -1) x =", "nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 *", "256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'like': [64,", "* 5, 120, bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84, 10,", "padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 = nn.Linear(16 * 5", "of the differences: * Reduced size of FC layers to 512 * Adjusted", "first layer does NOT have padding and therefore intermediate shapes do not match", "padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384,", "F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16 * 5 * 5))) x = F.relu(self.fc4(x))", "= nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7 in the", "instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert False @staticmethod def make_layers(config, batch_norm=False,", "super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True,", "512, 512, [512], 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256,", "does NOT have padding and therefore intermediate shapes do not match the official", "nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2,", "shapes do not match the official LeNet-5. \"\"\" def __init__(self, enable_bias=True): super().__init__() self.conv1", "= F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2)", "nn.Linear(100, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x))", "4 * 4))) x = F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS = { 'C':", "= F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50 * 4 *", "512, 512, 'M'], 'like': [64, 64, 'M', 128, 128, 'M', 256, 256, 256,", "else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in {'C', 'D'}: self.classifier =", "nn.Linear(16 * 5 * 5, 120, bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5", "self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 = nn.Linear(16 * 5 * 5,", "256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']} class VGG(nn.Module):", "nn.Linear(84, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2)", "return x class LeNet_5_Caffe(nn.Module): \"\"\" This is based on Caffe's implementation of Lenet-5", "super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5,", "512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if", "padding = 3, 1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm:", "layers with BatchNorm \"\"\" def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features =", "not None) and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(),", "nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif", "nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 = nn.Linear(16 * 5 * 5, 120, bias=enable_bias)", "config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64,", "[64, 64, 'M', 128, 128, 'M', 256, 256, [256], 'M', 512, 512, [512],", "= self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256], 'M'],", "512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config", "x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x =", "84, bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x))", "512 * 7 * 7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead", "nn.Linear(500, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2)", "dropout layers with BatchNorm \"\"\" def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features", "bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config ==", "bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base =", "F.relu(self.fc3(x.view(-1, 16 * 5 * 5))) x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1)", "F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x", "enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512,", "gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x", "256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']} class", "official LeNet-5. \"\"\" def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, padding=0,", "256, 256, [256], 'M', 512, 512, [512], 'M', 512, 512, [512], 'M'], 'D':", "k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def forward(self, x): x", "self.fc_base = nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 *", "300, bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias) def", "+= [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list): v, kernel_size, padding = v[0], 1,", "nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7 in the original", "batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 =", "feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x = x.view(x.size(0), -1) x =", "in SNIP paper: 1. VGG-C (16 layers) 2. VGG-D (16 layers) 3. VGG-like", "if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list): v,", "VGG-D (16 layers) 3. VGG-like Some of the differences: * Reduced size of", "layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) def forward(self, input, epoch_id=None,", "96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias),", "x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x =", "256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']} class VGG(nn.Module): \"\"\"", "x): x = self.conv_base(x) x = x.view(x.size(0), -1) x = self.fc_base(x) x =", "self.fc3 = nn.Linear(100, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.fc1(x.view(-1, 784))) x", "'M', 256, 256, [256], 'M', 512, 512, [512], 'M', 512, 512, [512], 'M'],", "'like': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512,", "batch_norm=True, enable_bias=enable_bias) if config in {'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias),", "k, num_classes)) def forward(self, x): x = self.conv_base(x) x = x.view(x.size(0), -1) x", "nn import torch.nn.functional as F import scipy.io as scio from torchvision.models import vgg19_bn,", "2) x = F.relu(self.fc3(x.view(-1, 16 * 5 * 5))) x = F.relu(self.fc4(x)) x", "enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features =", "__init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20,", "size of FC layers to 512 * Adjusted flattening to match CIFAR-10 shapes", "dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias))", "nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias) def forward(self, x): x =", "[conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None, gt=None):", "input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3", "100, bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.fc1(x.view(-1,", "to generate three VGG variants used in SNIP paper: 1. VGG-C (16 layers)", "class VGG(nn.Module): \"\"\" This is a base class to generate three VGG variants", "* 7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512,", "# instead of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout", "nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2,", "nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024", "bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x", "k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def forward(self, x): x = self.conv_base(x) x", "x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16 * 5 * 5))) x", "nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3,", "5, bias=enable_bias) self.fc3 = nn.Linear(50 * 4 * 4, 500, bias=enable_bias) self.fc4 =", "= F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50 * 4 * 4))) x =", "nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert False @staticmethod", "AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base", "super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5,", "= F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module): \"\"\" This is based on Caffe's", "kernel_size, padding = v[0], 1, 0 else: kernel_size, padding = 3, 1 conv2d", "nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1,", "__init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6,", "[512], 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M',", "128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512,", "512, [512], 'M', 512, 512, [512], 'M'], 'D': [64, 64, 'M', 128, 128,", "self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256,", "nn.Linear(1024 * k, num_classes)) def forward(self, x): x = self.conv_base(x) x = x.view(x.size(0),", "dim=1) return x VGG_CONFIGS = { 'C': [64, 64, 'M', 128, 128, 'M',", "+= [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return", "if enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128,", "conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v),", "dim=1) return x class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10,", "else: assert False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes", "x class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet,", "7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes,", "6, 5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 = nn.Linear(16", "def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2 =", "'C': [64, 64, 'M', 128, 128, 'M', 256, 256, [256], 'M', 512, 512,", "'M'], 'like': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512,", "torchvision.models import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original code", "* Replaced dropout layers with BatchNorm \"\"\" def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False):", "dim=1) return x class LeNet_5_Caffe(nn.Module): \"\"\" This is based on Caffe's implementation of", "is true super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias)", "CIFAR-10 shapes * Replaced dropout layers with BatchNorm \"\"\" def __init__(self, config, num_classes=10,", "nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None, gt=None): if", "= nn.Linear(16 * 5 * 5, 120, bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias)", "nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2,", "x = x.view(x.size(0), -1) x = self.fc_base(x) x = F.log_softmax(x, dim=1) return x", "have padding and therefore intermediate shapes do not match the official LeNet-5. \"\"\"", "16 * 5 * 5))) x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return", "from the vanilla LeNet-5. Note that the first layer does NOT have padding", "batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(),", "nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7 in the original VGG", "intermediate shapes do not match the official LeNet-5. \"\"\" def __init__(self, enable_bias=True): super().__init__()", "else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) def forward(self, input,", "super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True),", "self.features(input) x = x.view(x.size(0), -1) x = self.classifier(x) x = F.log_softmax(x, dim=1) return", "256, [256], 'M', 512, 512, [512], 'M', 512, 512, [512], 'M'], 'D': [64,", "the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True),", "in_channels = v return nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features:", "config in {'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 *", "bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias) def forward(self,", "F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16 * 5 * 5)))", "256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'like':", "enable_bias=True): # original code is true super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2", "SNIP paper: 1. VGG-C (16 layers) 2. VGG-D (16 layers) 3. VGG-like Some", "= F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16 * 5 *", "nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else:", "512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512,", "x = F.relu(self.fc3(x.view(-1, 16 * 5 * 5))) x = F.relu(self.fc4(x)) x =", "import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original code is", "self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id is", "torch.nn as nn import torch.nn.functional as F import scipy.io as scio from torchvision.models", "v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers", "num_classes, bias=enable_bias)) else: assert False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO:", "256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias),", "self.fc3 = nn.Linear(50 * 4 * 4, 500, bias=enable_bias) self.fc4 = nn.Linear(500, 10,", "x class LeNet_5_Caffe(nn.Module): \"\"\" This is based on Caffe's implementation of Lenet-5 and", "nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def forward(self, x): x =", "5))) x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module): \"\"\"", "if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels", "self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100,", "None) and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1':", "differences: * Reduced size of FC layers to 512 * Adjusted flattening to", "class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original code is true super().__init__() self.fc1 =", "4, 500, bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias) def forward(self, x): x =", "= self.features_block5(feat_block4) if (epoch_id is not None) and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id,", "= F.log_softmax(x, dim=1) return x class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self,", "therefore intermediate shapes do not match the official LeNet-5. \"\"\" def __init__(self, enable_bias=True):", "original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512),", "nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024", "= nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]", "F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50 * 4 * 4))) x = F.log_softmax(self.fc4(x),", "'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512,", "x = F.log_softmax(x, dim=1) return x class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def", "# instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config == 'like': self.classifier =", "not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3':", "5 * 5, 120, bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84,", "= F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self,", "enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in {'C', 'D'}: self.classifier", "'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512,", "feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x = x.view(x.size(0), -1)", "https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96,", "* k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True),", "F.log_softmax(x, dim=1) return x class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4,", "else: x = self.features(input) x = x.view(x.size(0), -1) x = self.classifier(x) x =", "512, 512, 512, 'M']} class VGG(nn.Module): \"\"\" This is a base class to", "self.fc4 = nn.Linear(500, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x =", "[512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True,", "stride=2)] else: if isinstance(v, list): v, kernel_size, padding = v[0], 1, 0 else:", "False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes or no?", "layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list): v, kernel_size, padding = v[0],", "256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'like': [64, 64,", "k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 * k), nn.BatchNorm1d(1024 *", "* Reduced size of FC layers to 512 * Adjusted flattening to match", "layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v", "feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id is not None) and (batch_id", "base class to generate three VGG variants used in SNIP paper: 1. VGG-C", "20, 5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 = nn.Linear(50", "1. VGG-C (16 layers) 2. VGG-D (16 layers) 3. VGG-like Some of the", "for idx, v in enumerate(config): if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)]", "self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias) def forward(self, x):", "512, 512, 512, 'M', 512, 512, 512, 'M'], 'like': [64, 64, 'M', 128,", "as scio from torchvision.models import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True):", "num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64, 64,", "x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x = x.view(x.size(0), -1) x = self.classifier(x) x", "self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96,", "LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original code is true super().__init__() self.fc1 = nn.Linear(784,", "do not match the official LeNet-5. \"\"\" def __init__(self, enable_bias=True): super().__init__() self.conv1 =", "7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, 512,", "in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else:", "feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id is not", "self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512], 'M'],", "'like': self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7", "not match the official LeNet-5. \"\"\" def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1,", "self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias) def forward(self, x):", "= self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id", "kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers +=", "slightly different from the vanilla LeNet-5. Note that the first layer does NOT", "= nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias) def forward(self, x): x", "4))) x = F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS = { 'C': [64, 64,", "the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) else:", "feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4) if", "self.fc3 = nn.Linear(16 * 5 * 5, 120, bias=enable_bias) self.fc4 = nn.Linear(120, 84,", "5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 = nn.Linear(50 *", "if (epoch_id is not None) and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img':", "kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384),", "bias=enable_bias)) else: assert False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN", "'M', 512, 512, [512], 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256,", "enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 =", "nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True),", "'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias)", "nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256,", "256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024 *", "F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS = { 'C': [64, 64, 'M', 128, 128,", "bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384,", "= self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64,", "self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4)", "[512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config", "stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024", "[512], 'M', 512, 512, [512], 'M'], 'D': [64, 64, 'M', 128, 128, 'M',", "== 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list): v, kernel_size, padding", "resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original code is true super().__init__()", "Reduced size of FC layers to 512 * Adjusted flattening to match CIFAR-10", "nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024", "nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 * k), nn.BatchNorm1d(1024 * k),", "= F.relu(self.fc3(x.view(-1, 50 * 4 * 4))) x = F.log_softmax(self.fc4(x), dim=1) return x", "paper: 1. VGG-C (16 layers) 2. VGG-D (16 layers) 3. VGG-like Some of", "F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5,", "self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256],", "self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512,", "= { 'C': [64, 64, 'M', 128, 128, 'M', 256, 256, [256], 'M',", "of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config == 'like': self.classifier = nn.Sequential( nn.Linear(512,", "self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7 in", "and is slightly different from the vanilla LeNet-5. Note that the first layer", "stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True),", "__init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2,", "different from the vanilla LeNet-5. Note that the first layer does NOT have", "BN yes or no? layers = [] for idx, v in enumerate(config): if", "{'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 *", "'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'like': [64, 64, 'M',", "return nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input)", "= nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100, 10,", "'M', 512, 512, 512, 'M', 512, 512, 512, 'M']} class VGG(nn.Module): \"\"\" This", "in_channels=3): # TODO: BN yes or no? layers = [] for idx, v", "shapes * Replaced dropout layers with BatchNorm \"\"\" def __init__(self, config, num_classes=10, enable_bias=True,", "self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in {'C', 'D'}: self.classifier = nn.Sequential(", "layers = [] for idx, v in enumerate(config): if v == 'M': layers", "from torchvision.models import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original", "enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256,", "x = self.features(input) x = x.view(x.size(0), -1) x = self.classifier(x) x = F.log_softmax(x,", "input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()})", "VGG(nn.Module): \"\"\" This is a base class to generate three VGG variants used", "batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels =", "epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 =", "2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50 *", "generate three VGG variants used in SNIP paper: 1. VGG-C (16 layers) 2.", "is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(),", "= self.classifier(x) x = F.log_softmax(x, dim=1) return x class AlexNet(nn.Module): # copy from", "F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module): \"\"\" This is based", "3, 1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers +=", "Caffe's implementation of Lenet-5 and is slightly different from the vanilla LeNet-5. Note", "= self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512],", "dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3):", "= nn.Linear(50 * 4 * 4, 500, bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias)", "* k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def forward(self, x): x = self.conv_base(x)", "and therefore intermediate shapes do not match the official LeNet-5. \"\"\" def __init__(self,", "512, 512, 'M', 512, 512, 512, 'M']} class VGG(nn.Module): \"\"\" This is a", "the vanilla LeNet-5. Note that the first layer does NOT have padding and", "'M', 512, 512, 512, 'M']} class VGG(nn.Module): \"\"\" This is a base class", "enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128,", "self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config],", "* 4, 500, bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias) def forward(self, x): x", "'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7", "stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True),", "assert False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes or", "'M', 128, 128, 'M', 256, 256, [256], 'M', 512, 512, [512], 'M', 512,", "* 7 * 7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of", "nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2", "if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 =", "dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config == 'like': self.classifier = nn.Sequential( nn.Linear(512, 512,", "= self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512],", "class to generate three VGG variants used in SNIP paper: 1. VGG-C (16", "feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input)", "[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512,", "384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias),", "5, bias=enable_bias) self.fc3 = nn.Linear(16 * 5 * 5, 120, bias=enable_bias) self.fc4 =", "512, 'M', 512, 512, 512, 'M']} class VGG(nn.Module): \"\"\" This is a base", "5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 = nn.Linear(16 *", "Replaced dropout layers with BatchNorm \"\"\" def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__()", "'M']} class VGG(nn.Module): \"\"\" This is a base class to generate three VGG", "'M', 512, 512, [512], 'M', 512, 512, [512], 'M'], 'D': [64, 64, 'M',", "LeNet-5. Note that the first layer does NOT have padding and therefore intermediate", "128, 128, 'M', 256, 256, [256], 'M', 512, 512, [512], 'M', 512, 512,", "512, [512], 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256,", "import torch.nn as nn import torch.nn.functional as F import scipy.io as scio from", "5 * 5))) x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return x class", "= nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 = nn.Linear(50 * 4 * 4, 500,", "50, 5, bias=enable_bias) self.fc3 = nn.Linear(50 * 4 * 4, 500, bias=enable_bias) self.fc4", "in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5", "[] for idx, v in enumerate(config): if v == 'M': layers += [nn.MaxPool2d(kernel_size=2,", "-1) x = self.classifier(x) x = F.log_softmax(x, dim=1) return x class AlexNet(nn.Module): #", "self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True,", "* 5 * 5))) x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return x", "= self.conv_base(x) x = x.view(x.size(0), -1) x = self.fc_base(x) x = F.log_softmax(x, dim=1)", "1, 0 else: kernel_size, padding = 3, 1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size,", "= 3, 1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers", "vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original code is true", "x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x,", "Adjusted flattening to match CIFAR-10 shapes * Replaced dropout layers with BatchNorm \"\"\"", "nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 = nn.Linear(50 * 4 * 4, 500, bias=enable_bias)", "idx, v in enumerate(config): if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else:", "if isinstance(v, list): v, kernel_size, padding = v[0], 1, 0 else: kernel_size, padding", "copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential(", "forward(self, input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1)", "x.view(x.size(0), -1) x = self.classifier(x) x = F.log_softmax(x, dim=1) return x class AlexNet(nn.Module):", "dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2,", "FC layers to 512 * Adjusted flattening to match CIFAR-10 shapes * Replaced", "F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50", "def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1", "k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024", "[conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers)", "x = F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module): \"\"\" This is based on", "scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4':", "nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias)", "'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in", "return x class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True):", "nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3,", "VGG-C (16 layers) 2. VGG-D (16 layers) 3. VGG-like Some of the differences:", "== 'like': self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 *", "instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config == 'like': self.classifier = nn.Sequential(", "128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512,", "x): x = F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module):", "enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96),", "1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def forward(self,", "BatchNorm \"\"\" def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if", "bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias) def forward(self,", "bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 = nn.Linear(50 * 4 *", "k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5,", "forward(self, x): x = F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class", "bias=enable_bias)) elif config == 'like': self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512", "512, 512, 'M', 512, 512, 512, 'M'], 'like': [64, 64, 'M', 128, 128,", "VGG_CONFIGS = { 'C': [64, 64, 'M', 128, 128, 'M', 256, 256, [256],", "This is a base class to generate three VGG variants used in SNIP", "used in SNIP paper: 1. VGG-C (16 layers) 2. VGG-D (16 layers) 3.", "= nn.Linear(100, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.fc1(x.view(-1, 784))) x =", "self.classifier(x) x = F.log_softmax(x, dim=1) return x class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a", "nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1,", "bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256,", "nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead", "to match CIFAR-10 shapes * Replaced dropout layers with BatchNorm \"\"\" def __init__(self,", "1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 * k),", "with BatchNorm \"\"\" def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features", "flattening to match CIFAR-10 shapes * Replaced dropout layers with BatchNorm \"\"\" def", "kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256),", "= nn.Linear(84, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x,", "x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50 * 4", "to 512 * Adjusted flattening to match CIFAR-10 shapes * Replaced dropout layers", "128, 'M', 256, 256, [256], 'M', 512, 512, [512], 'M', 512, 512, [512],", "nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 =", "padding=padding, bias=enable_bias) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d,", "three VGG variants used in SNIP paper: 1. VGG-C (16 layers) 2. VGG-D", "{ 'C': [64, 64, 'M', 128, 128, 'M', 256, 256, [256], 'M', 512,", "self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias)", "nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def forward(self, x): x = self.conv_base(x) x =", "= nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3", "= nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3", "x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16 * 5", "nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 =", "500, bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x))", "= self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id is not None)", "padding = v[0], 1, 0 else: kernel_size, padding = 3, 1 conv2d =", "* 4 * 4))) x = F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS = {", "x = x.view(x.size(0), -1) x = self.classifier(x) x = F.log_softmax(x, dim=1) return x", "self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias)", "Note that the first layer does NOT have padding and therefore intermediate shapes", "padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base", "is slightly different from the vanilla LeNet-5. Note that the first layer does", "stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True),", "x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1", "120, bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias) def", "x = self.features_block5(feat_block4) if (epoch_id is not None) and (batch_id is not None):", "* k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def forward(self, x):", "'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias)", "TODO: BN yes or no? layers = [] for idx, v in enumerate(config):", "+= [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None,", "This is based on Caffe's implementation of Lenet-5 and is slightly different from", "nn.Linear(512, num_classes, bias=enable_bias)) elif config == 'like': self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias),", "nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) def forward(self,", "nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias) def forward(self, x): x =", "in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias))", "def forward(self, input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 =", "16, 5, bias=enable_bias) self.fc3 = nn.Linear(16 * 5 * 5, 120, bias=enable_bias) self.fc4", "x = F.relu(self.fc3(x.view(-1, 50 * 4 * 4))) x = F.log_softmax(self.fc4(x), dim=1) return", "VGG variants used in SNIP paper: 1. VGG-C (16 layers) 2. VGG-D (16", "implementation of Lenet-5 and is slightly different from the vanilla LeNet-5. Note that", "1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers += [conv2d,", "self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128,", "import scipy.io as scio from torchvision.models import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def", "512, 512, 512, 'M'], 'like': [64, 64, 'M', 128, 128, 'M', 256, 256,", "F.relu(self.fc3(x.view(-1, 50 * 4 * 4))) x = F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS", "that the first layer does NOT have padding and therefore intermediate shapes do", "x = F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS = { 'C': [64, 64, 'M',", "padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256,", "import torch.nn.functional as F import scipy.io as scio from torchvision.models import vgg19_bn, resnet152,", "bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.fc1(x.view(-1, 784)))", "the first layer does NOT have padding and therefore intermediate shapes do not", "match CIFAR-10 shapes * Replaced dropout layers with BatchNorm \"\"\" def __init__(self, config,", "{'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5':", "kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024 * k),", "batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features", "x VGG_CONFIGS = { 'C': [64, 64, 'M', 128, 128, 'M', 256, 256,", "as F import scipy.io as scio from torchvision.models import vgg19_bn, resnet152, densenet161 class", "bias=enable_bias) def forward(self, x): x = F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x),", "'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list): v, kernel_size, padding =", "enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'],", "self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias)", "batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 =", "gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4", "code is true super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300, 100,", "= F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1,", "'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x", "self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias)", "* k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 * k), nn.BatchNorm1d(1024", "is based on Caffe's implementation of Lenet-5 and is slightly different from the", "the differences: * Reduced size of FC layers to 512 * Adjusted flattening", "512, 'M']} class VGG(nn.Module): \"\"\" This is a base class to generate three", "[nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list): v, kernel_size, padding = v[0], 1, 0", "vanilla LeNet-5. Note that the first layer does NOT have padding and therefore", "= nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias) def forward(self, x): x", "def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2 =", "= F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module): \"\"\" This is", "10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x", "\"\"\" This is a base class to generate three VGG variants used in", "is not None) and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt':", "feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x", "# copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base =", "256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256,", "= self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True,", "F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50 * 4 * 4)))", "= [] for idx, v in enumerate(config): if v == 'M': layers +=", "original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert", "nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential(", "a base class to generate three VGG variants used in SNIP paper: 1.", "nn.Linear(50 * 4 * 4, 500, bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias) def", "[256], 'M', 512, 512, [512], 'M', 512, 512, [512], 'M'], 'D': [64, 64,", "class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias)", "x = self.classifier(x) x = F.log_softmax(x, dim=1) return x class AlexNet(nn.Module): # copy", "F import scipy.io as scio from torchvision.models import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module):", "padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384,", "self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512,", "instead of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512,", "def __init__(self, enable_bias=True): # original code is true super().__init__() self.fc1 = nn.Linear(784, 300,", "512, 512, 'M']} class VGG(nn.Module): \"\"\" This is a base class to generate", "nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) def", "kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384),", "and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(),", "enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3,", "@staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes or no? layers", "__init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1 =", "make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes or no? layers = []", "# 512 * 7 * 7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), #", "F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module): \"\"\" This is based on Caffe's implementation", "class LeNet_5_Caffe(nn.Module): \"\"\" This is based on Caffe's implementation of Lenet-5 and is", "# original code is true super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2 =", "return x VGG_CONFIGS = { 'C': [64, 64, 'M', 128, 128, 'M', 256,", "def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11,", "'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x =", "VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert False", "kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256),", "variants used in SNIP paper: 1. VGG-C (16 layers) 2. VGG-D (16 layers)", "384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias),", "* 5))) x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module):", "num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias),", "3. VGG-like Some of the differences: * Reduced size of FC layers to", "# TODO: BN yes or no? layers = [] for idx, v in", "\"\"\" This is based on Caffe's implementation of Lenet-5 and is slightly different", "return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6,", "batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes or no? layers = [] for", "bias=enable_bias) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)]", "isinstance(v, list): v, kernel_size, padding = v[0], 1, 0 else: kernel_size, padding =", "= v return nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1", "in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, 512, bias=enable_bias),", "512 * Adjusted flattening to match CIFAR-10 shapes * Replaced dropout layers with", "v return nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1 =", "num_classes, bias=enable_bias)) elif config == 'like': self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), #", "densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original code is true super().__init__() self.fc1", "self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id is not None) and", "512, 512, [512], 'M', 512, 512, [512], 'M'], 'D': [64, 64, 'M', 128,", "v, kernel_size, padding = v[0], 1, 0 else: kernel_size, padding = 3, 1", "[256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True,", "enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512,", "(16 layers) 3. VGG-like Some of the differences: * Reduced size of FC", "self.fc5 = nn.Linear(84, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x =", "\"\"\" def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features:", "64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M',", "batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256,", "'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x = x.view(x.size(0), -1) x", "layer does NOT have padding and therefore intermediate shapes do not match the", "self.features_block5(feat_block4) if (epoch_id is not None) and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id),", "yes or no? layers = [] for idx, v in enumerate(config): if v", "bias=enable_bias), # 512 * 7 * 7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512),", "true super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3", "= F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1 =", "config == 'like': self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7", "nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2,", "Lenet-5 and is slightly different from the vanilla LeNet-5. Note that the first", "self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 = nn.Linear(50 * 4 * 4,", "forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x", "* 4 * 4, 500, bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias) def forward(self,", "* Adjusted flattening to match CIFAR-10 shapes * Replaced dropout layers with BatchNorm", "F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True):", "scipy.io as scio from torchvision.models import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self,", "= F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS = { 'C': [64, 64, 'M', 128,", "enable_bias=enable_bias) if config in {'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), #", "nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config == 'like': self.classifier", "layers) 2. VGG-D (16 layers) 3. VGG-like Some of the differences: * Reduced", "batch_id=None, gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2)", "4 * 4, 500, bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias) def forward(self, x):", "of Lenet-5 and is slightly different from the vanilla LeNet-5. Note that the", "self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in {'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512,", "nn.Linear(512, num_classes, bias=enable_bias)) else: assert False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): #", "'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],", "64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias)", "list): v, kernel_size, padding = v[0], 1, 0 else: kernel_size, padding = 3,", "VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), #", "self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512], 'M'],", "layers) 3. VGG-like Some of the differences: * Reduced size of FC layers", "128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True,", "x = self.conv_base(x) x = x.view(x.size(0), -1) x = self.fc_base(x) x = F.log_softmax(x,", "padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 *", "NOT have padding and therefore intermediate shapes do not match the official LeNet-5.", "class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__()", "in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 =", "def forward(self, x): x = self.conv_base(x) x = x.view(x.size(0), -1) x = self.fc_base(x)", "scio from torchvision.models import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): #", "'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']}", "of FC layers to 512 * Adjusted flattening to match CIFAR-10 shapes *", "match the official LeNet-5. \"\"\" def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20,", "nn.Linear(1024 * k, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k,", "nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1,", "# instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert False @staticmethod def make_layers(config,", "bias=enable_bias) self.fc3 = nn.Linear(50 * 4 * 4, 500, bias=enable_bias) self.fc4 = nn.Linear(500,", "original code is true super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300,", "bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384,", "nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 *", "def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x))", "= nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5,", "the official LeNet-5. \"\"\" def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5,", "enumerate(config): if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list):", "5, 120, bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias)", "padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 = nn.Linear(50 * 4", "bias=enable_bias) self.fc3 = nn.Linear(16 * 5 * 5, 120, bias=enable_bias) self.fc4 = nn.Linear(120,", "'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else:", "if config in {'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512", "in {'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7", "x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50 * 4 * 4))) x", "= nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k,", "* k, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes))", "forward(self, x): x = self.conv_base(x) x = x.view(x.size(0), -1) x = self.fc_base(x) x", "None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(),", "* 5 * 5, 120, bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5 =", "nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3,", "'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512,", "in enumerate(config): if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v,", "nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert False @staticmethod def", "= v[0], 1, 0 else: kernel_size, padding = 3, 1 conv2d = nn.Conv2d(in_channels,", "self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id is not None) and (batch_id is not", "bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 * k),", "2) x = F.relu(self.fc3(x.view(-1, 50 * 4 * 4))) x = F.log_softmax(self.fc4(x), dim=1)", "7 * 7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout", "= self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in {'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512," ]
[ "test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"):", "with pytest.raises(Exception, match='The string \"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker:", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my", "test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my error message\"):", "message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture,", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture,", "appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270 ):", "under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error message\" ) def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\"", "import x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string", "message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\")", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\"", "match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture,", "return_value=\"abc\") with pytest.raises(Exception, match='The string \"def\" was not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker:", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were", "import MockerFixture from robot.api import logger from Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker: MockerFixture,", "found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"def\"", "\"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def", "return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails(", "import pytest from pytest_mock import MockerFixture from robot.api import logger from Mainframe3270.x3270 import", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test:", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my", "string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ):", "def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error", "\"abc\", error_message=\"my error message\" ) def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\",", "it appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception, match='The string \"b\"", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\",", "with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test:", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test:", "ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"ABC\"", "was found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\",", "under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def", "def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The string", "return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No", "'ghi']\\\" were not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270", "string \"abc\" was found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_match(", "test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker:", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270", "message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\" ) def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def", "ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my", "found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270", "under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception,", "ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my", "def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\",", "under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'):", "under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception,", "\"abc\" was found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\")", "match='The string \"b\" was not found \"1\" times, it appears \"0\" times' ):", "found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True)", "def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture,", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my", "message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270):", "test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"):", "found \"1\" times, it appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker:", "not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def", "def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture,", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No matches found for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\")", "error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture, under_test:", "match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test:", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ):", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"):", "for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\")", "return_value=\"a\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error message\" )", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The string \"abc\" was found'):", "match=re.escape(r'No matches found for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270):", "appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception, match='The string \"b\" was", "from pytest_mock import MockerFixture from robot.api import logger from Mainframe3270.x3270 import x3270 def", "ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The strings", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match=re.escape('There are matches found for \"[a]+\" pattern')", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error", "message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\")", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No matches found for \"*e?g*\" pattern')", "test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270):", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The string \"abc\" was", "\"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture,", "): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True)", "not found \"1\" times, it appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises(", "mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270", "error message\" ) def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270):", "return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" )", "under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match=re.escape('There are matches found for", "with pytest.raises( Exception, match='The string \"a\" was not found \"1\" times, it appears", "test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The string \"ghi\"", "return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match=re.escape('There", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\",", "logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string", "pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "from robot.api import logger from Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270):", "under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error", "pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker:", "\"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"])", "under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"],", "match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270", "match='The string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270", "are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture,", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_match( \"*abc*\",", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def", "return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\",", "under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times(", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my", "\"abc\" was found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\"", "test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails(", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\")", "under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No matches found for", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def", "error_message=\"my error message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24)", "with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def", "matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\"", "match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture,", "was not found \"1\" times, it appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270", "def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture,", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "\"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\",", "def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my error", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No matches found for \"\\d+\"", "def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No matches found", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception,", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\",", "pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def", "for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ):", "def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message(", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"],", "match='The string \"a\" was not found \"1\" times, it appears \"24\" times' ):", "under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24,", "match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error message\" ) def test_page_should_match_regex(mocker: MockerFixture,", "under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No matches", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The string \"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\",", "def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The strings \\\"['def',", "\"1\" times, it appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture,", "for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\")", "def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error", "error message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error message\" ) def test_page_should_match_regex(mocker: MockerFixture, under_test:", "were not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ):", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\",", "return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message(", "\"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings(", "with pytest.raises( Exception, match=re.escape('There are matches found for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def", "was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test:", "for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture,", "with pytest.raises( Exception, match=re.escape('No matches found for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case(", "error_message=\"my error message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"])", "under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match=re.escape('There are", "[\"def\", \"ghi\"], error_message=\"my error message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\",", "test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\"", "\"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "pytest.raises(Exception, match='The string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test:", "found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with", "test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The string \"abc\"", "was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "\"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True)", "for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No matches found for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def", "def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270):", "logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "it appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270", ") def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker:", "[\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"])", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\" was", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string(", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_ignore_case(", "return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def test_page_should_not_contain_string(mocker: MockerFixture,", "def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match=re.escape('There are matches", "def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270):", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", ") def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test:", "return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test:", "message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails(", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern')", "<reponame>MichaelSeeburger/Robot-Framework-Mainframe-3270-Library import re import pytest from pytest_mock import MockerFixture from robot.api import logger", "24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No matches found for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\")", "def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\",", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test:", "\"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\",", "test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "pytest.raises( Exception, match='The string \"a\" was not found \"1\" times, it appears \"24\"", "under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my", "pytest.raises( Exception, match='The string \"b\" was not found \"1\" times, it appears \"0\"", "error message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker:", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "string \"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\",", "pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_match( \"*abc*\", error_message=\"my", "def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270):", "error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\" ) def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270):", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "was not found \"1\" times, it appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def", "error message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def", "with pytest.raises(Exception, match='The string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture,", "Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches", "1, error_message=\"my error message\" ) def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\")", "matches found for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270", "return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def test_page_should_not_contain_match(mocker: MockerFixture,", "\"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270 ):", "with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error message\" ) def", "logger from Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\")", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True)", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my", "def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"ABC\" was", "pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error message\" ) def test_page_should_contain_all_strings(mocker:", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The string \"ghi\" was not", "pytest_mock import MockerFixture from robot.api import logger from Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker:", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def", "match='The string \"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test:", "\"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No matches found for \"\\d+\" pattern') ):", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No matches found for \"\\d+\" pattern')", "\"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "ignore_case=True) logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception,", "test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test:", "under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270", "return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture,", "not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker:", "string \"a\" was not found \"1\" times, it appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\",", "under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception,", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_match( \"*abc*\", error_message=\"my error message\"", "test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"):", "with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\" ) def test_page_should_contain_any_string(mocker:", "match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\" ) def test_page_should_contain_any_string(mocker: MockerFixture, under_test:", "test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are", "found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"def\" was not", "was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\")", "\"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ):", "\"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\")", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error message\"", "pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_contain_string_x_times(mocker:", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The string \"ghi\" was not found'):", "def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270):", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "\"b\" was not found \"1\" times, it appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1)", "found \"1\" times, it appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception,", "24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True)", "under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def", "pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_all_strings(mocker:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception,", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception,", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my", "message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def", "with pytest.raises( Exception, match='The string \"b\" was not found \"1\" times, it appears", "match=re.escape('There are matches found for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test:", "match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker:", ") def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\")", "pytest.raises( Exception, match=re.escape(r'No matches found for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture,", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\")", "): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception, match='The string \"b\" was not found \"1\"", "return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"],", "\"b\", 1, error_message=\"my error message\" ) def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\"", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message(", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\",", "def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True)", "matches found for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match='The", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No matches found for \"*e?g*\"", "def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_match( \"*abc*\", error_message=\"my error message\" )", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was", "\"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception,", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "pytest.raises( Exception, match=re.escape('There are matches found for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker:", "return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test:", "return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error message\" )", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\"", "test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture,", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match='The string \"a\" was not found \"1\"", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\",", "return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(", "re import pytest from pytest_mock import MockerFixture from robot.api import logger from Mainframe3270.x3270", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_fails(mocker:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error message\"", "test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match='The string", "match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test:", "test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test:", "\"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my", "ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for", "pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270):", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings(", "error_message=\"my error message\" ) def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"def\" was not found'): under_test.page_should_contain_string(\"def\")", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found", "under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture,", "test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test:", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The string \"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"])", "message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270):", "pytest.raises( Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def", "under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case(", "return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were not found\") ): under_test.page_should_contain_any_string([\"def\",", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"def\" was not found'): under_test.page_should_contain_string(\"def\") def", "matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test:", "error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No matches found for", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_fails(mocker: MockerFixture,", "was not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture,", "string \"def\" was not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ):", "found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "found for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There", "\"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ):", "return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails(", "under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\" ) def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def", "Exception, match=re.escape(r'No matches found for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test:", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270 ):", "test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was found')", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\" )", "side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\",", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\",", "message\" ) def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case(", "was found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match=re.escape('There are matches found for \"[a]+\" pattern') ):", "\"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The", "was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "string \"abc\" was found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\")", ") def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker:", "under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No matches", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270", "match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker:", ") def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker:", "return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" )", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string(", "error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\"", "\"a\" was not found \"1\" times, it appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1)", "return_value=\"a\") with pytest.raises( Exception, match=re.escape('There are matches found for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\")", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"],", "def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker:", "\"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"])", "pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\" ) def test_page_should_contain_any_string(mocker: MockerFixture,", "x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\"", "are matches found for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270):", "error message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"])", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_ignore_case( mocker:", "string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270", "def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture,", "test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"):", "return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No matches found for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def", "robot.api import logger from Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker:", "under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\")", "error message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"],", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True)", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were not", "def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ):", "pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\",", "with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case(", "under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with", "pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture,", "def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error", "\"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"],", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error", "with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True)", "return_value=\"abc\") with pytest.raises(Exception, match='The string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker:", "\"ghi\"], error_message=\"my error message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"])", "with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture,", "found for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "\"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('No matches found for \"*e?g*\" pattern') ):", "with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture,", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with", "test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"):", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test:", "test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\"", "test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"):", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error message\"", "match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture,", "test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "error message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture,", "with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error message\" ) def", "pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture,", "pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test:", "strings \\\"['def', 'ghi']\\\" were not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture,", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "\"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "\"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ):", "def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string", "under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test:", "with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error message\" ) def", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The string \"ghi\" was", "return_value=[\"def\"]) with pytest.raises(Exception, match='The string \"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message(", "pytest.raises(Exception, match='The string \"def\" was not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test:", "match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "under_test.page_should_contain_any_string([\"ABC\", \"def\"], ignore_case=True) def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception,", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match='The string \"a\" was not found \"1\" times,", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"def\" was not found'):", "match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture,", "match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture,", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270", "pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270):", "string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270", "test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match=re.escape('There are matches found", "found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture,", "def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def", "): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with", "with pytest.raises( Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"])", "\"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\",", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\", 1,", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my", "test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker: MockerFixture,", "return_value=\"a\") with pytest.raises( Exception, match='The string \"a\" was not found \"1\" times, it", "\"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\",", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True)", "error message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def", "mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270):", "\\\"['def', 'ghi']\\\" were not found\") ): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test:", "was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "[\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\")", "under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception, match='The string \"b\" was not found \"1\" times,", "def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The string", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string", "under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"ABC\",", "def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my", "test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker:", "ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\")", "under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\" was found') def", "ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match=re.escape('There are matches found for \"[a]+\"", "string \"b\" was not found \"1\" times, it appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\",", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"])", "with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker:", "from Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\")", "pytest.raises(Exception, match='The string \"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture,", "string \"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ):", "test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\"", "test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found", "\"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception, match='The string \"b\" was not", "return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error message\" )", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ):", "was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"], ignore_case=True) def test_page_should_contain_all_strings_fails( mocker:", "ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception,", "under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test:", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my", "return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\",", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "[\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "message\"): under_test.page_should_not_contain_any_string( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270):", "def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No matches found", "): under_test.page_should_contain_any_string([\"def\", \"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"],", "matches found for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_match( \"*abc*\", error_message=\"my error", "def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_match(\"*def*\", error_message=\"my error message\") def test_page_should_not_contain_match(mocker:", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def", "def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"def\", ignore_case=True) def test_page_should_not_contain_string_fails(mocker:", "Exception, match='The string \"a\" was not found \"1\" times, it appears \"24\" times'", "error_message=\"my error message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\",", "times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\")", "import logger from Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "with pytest.raises(Exception, match='The string \"def\" was not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture,", "return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(r'No", "message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test:", "def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"def\" was", "not found \"1\" times, it appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message(", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\")", ") def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"abc\", \"def\"]) under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case(", "pytest.raises( Exception, match=re.escape('No matches found for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker:", "1) with pytest.raises( Exception, match='The string \"b\" was not found \"1\" times, it", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape('There are matches found for \"*abc*\" pattern')", "pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test:", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error", "string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error", "found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error message\" ) def test_page_should_match_regex(mocker:", "def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error", "message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270):", "with pytest.raises( Exception, match=re.escape(r'No matches found for \"\\d+\" pattern') ): under_test.page_should_match_regex(r\"\\d+\") def test_page_should_not_match_regex(mocker:", "pytest from pytest_mock import MockerFixture from robot.api import logger from Mainframe3270.x3270 import x3270", "test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception, match='The string \"b\" was not found", "not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270", "times, it appears \"0\" times' ): under_test.page_should_contain_string_x_times(\"b\", 1) def test_page_should_contain_string_x_times_custom_message( mocker: MockerFixture, under_test:", "found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"ABC\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True)", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\",", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "error_message=\"my error message\") def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case(", "pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_any_string(mocker:", "Exception, match=re.escape('There are matches found for \"[a]+\" pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture,", "found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\")", "import re import pytest from pytest_mock import MockerFixture from robot.api import logger from", "with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker:", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"]) def test_page_should_contain_any_string_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error", "under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "\"ghi\"]) def test_page_should_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my", "test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"], error_message=\"my error message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test:", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were not found\")", "\"ABC\" was found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "times, it appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception, match='The string", "under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception,", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match='The string \"a\" was not", "Exception, match='The string \"b\" was not found \"1\" times, it appears \"0\" times'", "): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "message\" ) def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case(", "def test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_all_strings( [\"abc\", \"def\"],", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\", 24) def test_page_should_contain_string_x_times_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "pattern') ): under_test.page_should_not_match_regex(\"[a]+\") def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker:", "match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270", "message\" ) def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker: MockerFixture,", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises( Exception, match=re.escape(\"The strings \\\"['def', 'ghi']\\\" were not found\") ):", "\"def\" was not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was found') def", "error_message=\"my error message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\", \"ghi\"])", "\"1\" times, it appears \"24\" times' ): under_test.page_should_contain_string_x_times(\"a\", 1) with pytest.raises( Exception, match='The", "test_page_should_contain_all_strings_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"):", "def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The", "match='The string \"def\" was not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message( mocker: MockerFixture, under_test: x3270", "error_message=\"my error message\") def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_match(\"*def*\") def test_page_should_not_contain_match_fails(mocker:", "test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test:", "under_test.page_should_not_contain_all_strings([\"ABC\", \"def\"], ignore_case=True) def test_page_should_not_contain_all_strings_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"ABC\" was found'):", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_all_strings([\"abc\", \"def\"]) def test_page_should_not_contain_all_strings_custom_message(", "MockerFixture from robot.api import logger from Mainframe3270.x3270 import x3270 def test_page_should_contain_string(mocker: MockerFixture, under_test:", "x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match='The string \"a\" was not found", "error message\" ) def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_match_regex(r\"\\w+\") def test_page_should_match_regex_fails(mocker:", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match='The string \"a\"", "Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker:", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"aBc\") mocker.patch(\"robot.api.logger.info\") under_test.page_should_contain_string(\"abc\", ignore_case=True) logger.info.assert_called_with('The string \"abc\" was found')", "under_test.page_should_contain_any_string( [\"def\", \"ghi\"], error_message=\"my error message\" ) def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\",", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error message\") def test_page_should_not_contain_string(mocker:", "found'): under_test.page_should_not_contain_string(\"ABC\", ignore_case=True) def test_page_should_not_contain_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "error_message=\"my error message\" ) def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_any_string([\"abc\", \"def\"])", "return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_not_contain_string( \"abc\", error_message=\"my error message\" ) def", "are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test:", "test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_string(\"ABC\") def test_page_should_not_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"def\" was not found'): under_test.page_should_contain_string(\"def\") def test_page_should_contain_string_custom_message(", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"A\", 24, ignore_case=True) def test_page_should_contain_string_x_times_fails( mocker:", "MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with pytest.raises( Exception, match='The string \"a\" was", "Exception, match=re.escape('No matches found for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture,", "return_value=\"ABC\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message(", "\"def\"], error_message=\"my error message\" ) def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") under_test.page_should_contain_string_x_times(\"a\",", "under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_string(\"def\", error_message=\"my error", "under_test.page_should_contain_all_strings([\"abc\", \"def\"]) def test_page_should_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", side_effect=[\"AbC\", \"DeF\"]) under_test.page_should_contain_all_strings([\"abc\",", "match=re.escape('No matches found for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test:", "x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_contain_match(\"*a?c*\") def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(", "mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match=\"my error message\"): under_test.page_should_contain_any_string(", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=[\"def\"]) with pytest.raises(Exception, match='The string \"ghi\" was not found'): under_test.page_should_contain_all_strings([\"def\", \"ghi\"]) def", "): under_test.page_should_not_contain_match(\"*abc*\") def test_page_should_not_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(", "return_value=\"ABC\") under_test.page_should_contain_match(\"*a?c*\", ignore_case=True) def test_page_should_contain_match_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "found for \"*e?g*\" pattern') ): under_test.page_should_contain_match(\"*e?g*\") def test_page_should_contain_match_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "under_test.page_should_not_contain_any_string([\"abc\", \"def\"], ignore_case=True) def test_page_should_not_contain_any_string_custom_message( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with", "mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The string \"abc\" was found'): under_test.page_should_not_contain_any_string([\"abc\", \"def\"]) def test_page_should_not_contain_any_string_ignore_case(", "\"ghi\"]) def test_page_should_not_contain_any_string_fails( mocker: MockerFixture, under_test: x3270 ): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") with pytest.raises(Exception, match='The", "MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_all_strings([\"def\", \"ghi\"]) def test_page_should_not_contain_all_strings_ignore_case( mocker: MockerFixture, under_test: x3270", "message\"): under_test.page_should_contain_string_x_times( \"b\", 1, error_message=\"my error message\" ) def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270):", "Exception, match=re.escape('There are matches found for \"*abc*\" pattern') ): under_test.page_should_not_contain_match(\"*ABC*\", ignore_case=True) def test_page_should_not_contain_match_custom_message(", "under_test.page_should_contain_string(\"abc\") logger.info.assert_called_with('The string \"abc\" was found') def test_page_should_contain_string_ignore_case( mocker: MockerFixture, under_test: x3270 ):", "under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_match_regex(r\"\\d+\") def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"a\") with", "\"def\"], error_message=\"my error message\" ) def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270): mocker.patch(\"Mainframe3270.py3270.Emulator.string_get\", return_value=\"abc\") under_test.page_should_not_contain_any_string([\"def\"," ]
[ "[2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def test_slice(self): series = DiscreteSeries([[0, 0], [1,", "'(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in PTS]) sa", "1, 2, 3, 1, 2, 3, 1]) def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x:", "1], [2, 2]], domain=Interval(0, 3, True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1)", "a, b: a + b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5])", "1) def test_slice_outdomain(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertRaises(NotInDomainError, lambda:", "1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self):", "+ y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError,", "FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0;", "0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5)", "= ModuloSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2)", "5], '(-1;6)')) PTS = [-1, 0, 1, 2, 3, 4, 5] sa =", "a.join(b, lambda i, x, y: x + y) def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0,", "1), (1, 3), (2, 5)]) def test_eval2(self): sa = DiscreteSeries([[0, 0], [1, 1],", "self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 2),", "v: k) self.assertEquals(sb.data, [(0, 0), (1, 1), (2, 2)]) def test_eval3(self): sa =", "y) def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1, 1], [2, 2]], '<-5;2>')) def", "1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self):", "sa.join_discrete(sb, lambda i, a, b: a + b) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2,", "self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in PTS]) empty =", "[1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def test_slice(self): series = DiscreteSeries([[0,", "(2, 4)]) def test_eval2i(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sc", "= series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start,", "= sa.join_discrete(sb, lambda i, a, b: a + b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1,", "1, 1.9] EPTS = [x * x ** 2 for x in PTS]", "DiscreteSeries([[0, 1], [1, 2], [2, 3]]) sc = sa.join_discrete(sb, lambda i, a, b:", "x: x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for", "test_eval3(self): sa = FunctionSeries(lambda x: x ** 2, '<-10;10)') sb = FunctionSeries(NOOP, '<0;2)')", "[2, 2]], '<-5;2>')) def test_base(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]])", "series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0)", "2, '<-10;10)') sb = FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb, lambda i, a, b:", "PTS]) empty = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def", "self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3], 3) def test_conf(self): self.assertRaises(TypeError, lambda: LinearInterpolationSeries( FunctionSeries(NOOP, '<0;3)')))", "data for covering this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(", "= DiscreteSeries([(0, 0), (1, 0)], '<0;5>') a.join(b, lambda i, x, y: x +", "<gh_stars>0 import math import unittest from firanka.exceptions import NotInDomainError, DomainError from firanka.intervals import", "# note the invalid data for covering this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x:", "self.assertEqual(s[2.5], 2) def test_translation(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3) self.assertEqual(s[3],", "-3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS), [x", "4)]) def test_eval2i(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sc =", "= ser1.join(ser2, lambda i, x, y: x * y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self):", "** 2, '<-10;10)') sb = FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb, lambda i, a,", "sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa = DiscreteSeries([[0,", "self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]), [3, 1,", "self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def test_slice(self): series = DiscreteSeries([[0, 0], [1, 1], [2,", "x ** 2, '<-10;10)') sb = FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb, lambda i,", "test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0, 1), (1, 2), (3, 4)],", "= FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self): series", "self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in PTS]) sa =", "-2, -1, 0, 1, 2, 3, 4, 5]), [3, 1, 2, 3, 1,", "series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self):", "lambda: DiscreteSeries([[0, 0], [1, 1], [2, 2]], '<-5;2>')) def test_base(self): s = DiscreteSeries([[0,", "= FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0])", "sa = FunctionSeries(lambda x: x ** 2, '<-10;10)') sb = FunctionSeries(NOOP, '<0;2)') sc", "for covering this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [0,", "sp = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def", "self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def test_slice_outdomain(self): series = DiscreteSeries([[0, 0], [1,", "[1, 1], [2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self): sp = DiscreteSeries([[0, 0],", "a, b: a + b) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries)", "0], [1, 1], [2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i)", "i, a, b: a * b) PTS = [0, 1, 1.9] EPTS =", "lambda: dirs.join_discrete(logs, lambda i, x, y: x + y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self):", "self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb", "x ** 2, '<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda i,", "b: i) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0),", "3, 1]) def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)')) ser2", "firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series from .common import NOOP, HUGE_IDENTITY", "FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [-100, 0, 1, 2, 3, 4, 5],", "2, 3, 1]) def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)'))", "1], [2, 2]]) sb = DiscreteSeries([[0, 1], [1, 2], [2, 3]]) sc =", "lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda:", "self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS = [-1, -2, -3, 1, 2,", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1)", "TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP,", "in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self): # note the invalid", "def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0)", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError,", "import Interval from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series from .common", "TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a = DiscreteSeries([(0,", "TestLinearInterpolation(unittest.TestCase): def test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)'))", "** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i", "2, 3, 1, 2, 3, 1, 2, 3, 1]) def test_comp_discrete(self): ser1 =", "s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4],", "DiscreteSeries([(0, 0), (1, 0)], '<0;5>') a.join(b, lambda i, x, y: x + y)", "lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa =", "def test_slice(self): series = FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5)", "self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [0, 1, 2, 3, 4,", "the invalid data for covering this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x **", "dirs.join_discrete(logs, lambda i, x, y: x + y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError,", "1) def test_slice(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sp =", "test_eval2(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = FunctionSeries(NOOP, '<0;2>')", "DiscreteSeries([[0, 0], [1, 1], [2, 2]], '<-5;2>')) def test_base(self): s = DiscreteSeries([[0, 0],", "= FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i", "2, 3, 4, 5] sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS, '(-1;6)')", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = FunctionSeries(NOOP, '<0;2>') sc =", "0], [1, 1], [2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self): sp = DiscreteSeries([[0,", "lambda i, x, y: x + y) def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0],", "test_eval2i(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda", "lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self): series", "s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3)", "lambda: series[-1:2]) def test_translate(self): sp = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5],", "DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in PTS]) empty = FunctionSeries(lambda", "[1, 3, 5]) self.assertEqual(sc.data, [(0, 1), (1, 3), (2, 5)]) def test_eval2(self): sa", "i in PTS]) empty = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class", "1], [2, 2]], '<-5;2>')) def test_base(self): s = DiscreteSeries([[0, 0], [1, 1], [2,", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1)", "from firanka.exceptions import NotInDomainError, DomainError from firanka.intervals import Interval from firanka.series import DiscreteSeries,", "0), (1, 0), (2, 0)], '<0;5>') b = DiscreteSeries([(0, 0), (1, 0)], '<0;5>')", "4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i, x, y: x + y)) class", "1), (1, 2), (2, 3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3],", "0), (2, 0)], '<0;5>') b = DiscreteSeries([(0, 0), (1, 0)], '<0;5>') a.join(b, lambda", "True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2)", "\\ LinearInterpolationSeries, Series from .common import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError,", "'<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5, -4, -3, -2,", "DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in PTS]) sa = FunctionSeries(lambda", "PTS = [-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k,", "True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2) def", "[0, 1, 2, 3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x **", "= [x * x ** 2 for x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)')", "= ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]),", "FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_domain_sensitivity(self): logs = FunctionSeries(math.log,", "2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5])", "[1, 2], [2, 3]]) sc = sa.join_discrete(sb, lambda i, a, b: a +", "[(0, 1), (1, 3), (2, 5)]) def test_eval2(self): sa = DiscreteSeries([[0, 0], [1,", "i, a, b: a + b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3,", "ModuloSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5],", "1), (2, 2)]) def test_eval3(self): sa = FunctionSeries(lambda x: x ** 2, '<-10;10)')", "def test_apply_wild(self): def dzika(k, x, a=5, *args, **kwargs): return k PTS = [-1,", "FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series from .common import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def", "1], [2, 2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0])", "1], [2, 2]]).apply( lambda k, v: k) self.assertEquals(sb.data, [(0, 0), (1, 1), (2,", "self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self): s = DiscreteSeries([[0, 0], [1, 1],", "x ** 2 for x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def", "y: x + y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf;", "x in PTS]) def test_apply_wild(self): def dzika(k, x, a=5, *args, **kwargs): return k", "lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a = DiscreteSeries([(0, 0), (1, 0), (2,", "(3, 4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i, x, y: x + y))", "k, v: k) self.assertEquals(sb.data, [(0, 0), (1, 1), (2, 2)]) def test_eval3(self): sa", "PTS]) sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i,", "test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1, 1], [2, 2]], '<-5;2>')) def test_base(self): s", "i in PTS]) sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries)", "sa.join(sb, lambda i, a, b: a * b) PTS = [0, 1, 1.9]", "1), (1, 2), (2, 3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1],", "b: a * b) PTS = [0, 1, 1.9] EPTS = [x *", "lambda: s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self): s = DiscreteSeries([[0, 0], [1, 1], [2,", "(2, 5)]) def test_eval2(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb", "self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self): s =", "** 2) for i in PTS]) empty = FunctionSeries(lambda x: x ** 2,", "y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda:", "0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self): s = DiscreteSeries([[0,", "series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS), [x for x in PTS])", "self.assertEquals(series[-1], 3) def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)], '<-1;2)'))", "b: a + b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5]) self.assertEqual(sc.data,", "class TestFunctionSeries(unittest.TestCase): def test_slice(self): series = FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5)", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1)", "[1, 1], [2, 2]], domain=Interval(0, 3, True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1],", "lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [0, 1, 2, 3, 4, 5],", "FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self): series =", "'(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>')))", "2, 3, 1, 2, 3, 1]) def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x: x", "0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def test_slice_outdomain(self): series = DiscreteSeries([[0, 0], [1, 1],", "2) def test_translation(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3) self.assertEqual(s[3], 0)", "= FunctionSeries(lambda x: x ** 2, '<-10;10)') sb = FunctionSeries(NOOP, '<0;2)') sc =", "x in PTS]) def test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0, 1),", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply( lambda k, v: k) self.assertEquals(sb.data, [(0, 0),", "lambda k, v: k) self.assertEquals(sb.data, [(0, 0), (1, 1), (2, 2)]) def test_eval3(self):", "2, '<-10;10)').discretize( [0, 1, 2, 3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x:", "ser1 = ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)') ser3", "i, a, b: i) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data,", "k) self.assertEquals(sb.data, [(0, 0), (1, 1), (2, 2)]) def test_eval3(self): sa = FunctionSeries(lambda", "lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s = DiscreteSeries([[0, 0], [1, 1], [2, 2]],", "3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0,", "'<0;2>') sc = sa.join_discrete(sb, lambda i, a, b: a + b) self.assertEqual(sc.eval_points([0, 1,", "3, 4, 5] sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa,", "[1, 2, 3]) self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4,", "sb = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply( lambda k, v: k) self.assertEquals(sb.data,", "2, 3, 4, 5], '(-1;6)')) PTS = [-1, 0, 1, 2, 3, 4,", "FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i **", "i, x, y: x + y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries(", "a + b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5]) self.assertEqual(sc.data, [(0,", "LinearInterpolationSeries, Series from .common import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda:", "NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self):", "ModuloSeries, \\ LinearInterpolationSeries, Series from .common import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self):", "self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self): sp = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1)", "self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def", "b: a + b) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data,", "0), (1, 0)], '<0;5>') a.join(b, lambda i, x, y: x + y) def", "2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in", "x: x ** 2, '<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda", "3, 4, 5]), [3, 1, 2, 3, 1, 2, 3, 1, 2, 3,", "ser3 = ser1.join(ser2, lambda i, x, y: x * y) class TestLinearInterpolation(unittest.TestCase): def", "2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i) self.assertEqual(sc.eval_points([0, 1, 2]), [0,", "1, 2, 3, 4, 5]), [3, 1, 2, 3, 1, 2, 3, 1,", "1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3], 3) def test_conf(self): self.assertRaises(TypeError, lambda: LinearInterpolationSeries( FunctionSeries(NOOP,", "self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 2), (2, 4)]) def test_eval2i(self): sa =", "sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2])", "2, 3, 4, 5]), [3, 1, 2, 3, 1, 2, 3, 1, 2,", "series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0,", "0], [1, 1], [2, 2]], domain=Interval(0, 3, True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0)", "2, 3]) self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]),", "** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in", "x: k) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_apply_wild(self): def dzika(k, x,", "ser1.join(ser2, lambda i, x, y: x * y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series", "b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5]) self.assertEqual(sc.data, [(0, 1), (1,", "(1, 2), (2, 4)]) def test_eval2i(self): sa = DiscreteSeries([[0, 0], [1, 1], [2,", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a, b:", "0], [1, 1], [2, 2]]).apply( lambda k, v: k) self.assertEquals(sb.data, [(0, 0), (1,", "empty = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self):", "0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s =", "self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self):", "2], [2, 3]]) sc = sa.join_discrete(sb, lambda i, a, b: a + b)", "def test_redundancy_skip(self): a = DiscreteSeries([(0, 0), (1, 0), (2, 0)], '<0;5>') b =", "ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2,", "TestFunctionSeries(unittest.TestCase): def test_slice(self): series = FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5],", "2 for x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self): #", "x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i", "-2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS),", "class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries(", "x: x ** 2, '<-10;10)') sb = FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb, lambda", "i, a, b: a + b) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4]) self.assertIsInstance(sc,", "test_redundancy_skip(self): a = DiscreteSeries([(0, 0), (1, 0), (2, 0)], '<0;5>') b = DiscreteSeries([(0,", "k PTS = [-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika)", "sb = FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb, lambda i, a, b: a +", "= series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start,", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a,", "[-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for", "ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP,", "from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series from .common import NOOP,", "'<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i, x, y: x + y)) class TestModuloSeries(unittest.TestCase):", "self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self):", "2]), [1, 3, 5]) self.assertEqual(sc.data, [(0, 1), (1, 3), (2, 5)]) def test_eval2(self):", "self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [-100, 0, 1, 2, 3,", "dzika(k, x, a=5, *args, **kwargs): return k PTS = [-1, -2, -3, 1,", "2), (3, 4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i, x, y: x +", "Series from .common import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0])", "3) self.assertEquals(series[-1], 3) def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)],", "test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)')", "2) for i in PTS]) sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS)", "s[2.5]) s = DiscreteSeries([[0, 0], [1, 1], [2, 2]], domain=Interval(0, 3, True, True))", "k, x: k) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_apply_wild(self): def dzika(k,", "i, x, y: x * y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series = LinearInterpolationSeries(", "in PTS]) empty = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase):", "for x in PTS]) def test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0,", "'<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in PTS]) empty", "(2, 3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def test_advanced(self):", "= FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_domain_sensitivity(self): logs =", "sa.join_discrete(sb, lambda i, a, b: a + b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]),", "2), (1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5,", "[(i, i ** 2) for i in PTS]) sa = FunctionSeries(lambda x: x", "x ** 2, '<-10;10)').discretize( [0, 1, 2, 3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda:", "0)], '<0;5>') b = DiscreteSeries([(0, 0), (1, 0)], '<0;5>') a.join(b, lambda i, x,", "0) self.assertEqual(s[4], 1) def test_slice_outdomain(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]])", "'<0;5>') b = DiscreteSeries([(0, 0), (1, 0)], '<0;5>') a.join(b, lambda i, x, y:", "2]], '<-5;2>')) def test_base(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertEqual(s[0],", "= FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def", "= DiscreteSeries([[0, 1], [1, 2], [2, 3]]) sc = sa.join_discrete(sb, lambda i, a,", "** 2) for i in PTS]) sa = FunctionSeries(lambda x: x ** 2,", "4, 5], '(-1;6)')) PTS = [-1, 0, 1, 2, 3, 4, 5] sa", "ser2 = FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda i, x, y: x *", "2]), [0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 1), (2, 2)])", "2]]).apply( lambda k, v: k) self.assertEquals(sb.data, [(0, 0), (1, 1), (2, 2)]) def", "PTS = [-1, 0, 1, 2, 3, 4, 5] sa = FunctionSeries(lambda x:", "**kwargs): return k PTS = [-1, -2, -3, 1, 2, 3] series =", "5]), [3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) def", "= DiscreteSeries([(0, 0), (1, 0), (2, 0)], '<0;5>') b = DiscreteSeries([(0, 0), (1,", "x: x ** 2, '<-10;10)').discretize( [-100, 0, 1, 2, 3, 4, 5], '(-1;6)'))", "[1, 1], [2, 2]], '<-5;2>')) def test_base(self): s = DiscreteSeries([[0, 0], [1, 1],", "1], [2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError,", "def test_translation(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5],", "0], [1, 1], [2, 2]]) sb = DiscreteSeries([[0, 1], [1, 2], [2, 3]])", "'<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self): series = FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5]", "'<-10;10)') sb = FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb, lambda i, a, b: a", "[2, 2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError,", "in PTS]) sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data,", "self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop,", "PTS]) def test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0, 1), (1, 2),", "[(0, 0), (1, 1), (2, 2)]) def test_apply(self): sb = DiscreteSeries([[0, 0], [1,", "= FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb, lambda i, a, b: a + b)", "lambda i, a, b: i) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries)", "self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS", "'<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_apply_wild(self): def", "y: x * y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0, 1),", "ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1,", "0, 1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0, 1, 2,", "+ b) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0),", "test_slice(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5],", "test_slice(self): series = FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError,", "self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS = [-1, -2,", "def test_translate(self): sp = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5],", "1.9] EPTS = [x * x ** 2 for x in PTS] self.assertEqual(sc.eval_points(PTS),", "self.assertEquals(sb.data, [(0, 0), (1, 1), (2, 2)]) def test_eval3(self): sa = FunctionSeries(lambda x:", "'<0;2)') sc = sa.join(sb, lambda i, a, b: a * b) PTS =", "2]], domain=Interval(0, 3, True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda:", "2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda:", "a = DiscreteSeries([(0, 0), (1, 0), (2, 0)], '<0;5>') b = DiscreteSeries([(0, 0),", "HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a", "2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 2), (2, 4)]) def test_eval2i(self):", "FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2)", "'<0;5>') a.join(b, lambda i, x, y: x + y) def test_uncov(self): self.assertRaises(DomainError, lambda:", "PTS = [0, 1, 1.9] EPTS = [x * x ** 2 for", "+ b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5]) self.assertEqual(sc.data, [(0, 1),", "FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_apply_wild(self):", "def dzika(k, x, a=5, *args, **kwargs): return k PTS = [-1, -2, -3,", "NotInDomainError, DomainError from firanka.intervals import Interval from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \\", "3, 1, 2, 3, 1, 2, 3, 1]) def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda", "0), (1, 2), (2, 4)]) def test_eval2i(self): sa = DiscreteSeries([[0, 0], [1, 1],", "= FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i **", "3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def test_advanced(self): series", "x * y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0, 1), (1,", "s = DiscreteSeries([[0, 0], [1, 1], [2, 2]], domain=Interval(0, 3, True, True)) self.assertEqual(s[0],", "LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1],", "= FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb, lambda i, a, b: a * b)", "[-1, 0, 1, 2, 3, 4, 5] sa = FunctionSeries(lambda x: x **", "test_translation(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0)", "DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5]) self.assertEqual(sc.data, [(0, 1), (1, 3), (2,", "FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda i, x, y: x * y) class", "dirs = DiscreteSeries([(0, 1), (1, 2), (3, 4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self): sp =", "for x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self): # note", "[0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 2), (2, 4)]) def", "sc.domain) def test_discretize(self): # note the invalid data for covering this domain self.assertRaises(DomainError,", "1, 2, 3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x ** 2,", "sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i", "1]) def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)')) ser2 =", "1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) def test_comp_discrete(self): ser1", "= LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5)", "from firanka.intervals import Interval from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series", "self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a = DiscreteSeries([(0, 0), (1, 0),", "[x for x in PTS]) def test_apply_wild(self): def dzika(k, x, a=5, *args, **kwargs):", "x ** 2, '<-10;10)').discretize( [-100, 0, 1, 2, 3, 4, 5], '(-1;6)')) PTS", "x, y: x + y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP,", "2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS), [x for x", "2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def test_slice_outdomain(self): series = DiscreteSeries([[0, 0],", "lambda: s[2.5]) s = DiscreteSeries([[0, 0], [1, 1], [2, 2]], domain=Interval(0, 3, True,", "def test_eval2i(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY,", "self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self): # note the invalid data for covering this", "(1, 0), (2, 0)], '<0;5>') b = DiscreteSeries([(0, 0), (1, 0)], '<0;5>') a.join(b,", "import NotInDomainError, DomainError from firanka.intervals import Interval from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries,", "def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a = DiscreteSeries([(0, 0),", "'<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3], 3) def test_conf(self): self.assertRaises(TypeError, lambda:", "in PTS]) def test_apply_wild(self): def dzika(k, x, a=5, *args, **kwargs): return k PTS", "3), (2, 5)]) def test_eval2(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]])", "'<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda i, x, y: x", "self.assertEqual(sa.data, [(i, i ** 2) for i in PTS]) empty = FunctionSeries(lambda x:", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb,", "self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self): series = FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5],", "EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self): # note the invalid data for covering", "(1, 2), (2, 3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3], 3)", "3, 5]) self.assertEqual(sc.data, [(0, 1), (1, 3), (2, 5)]) def test_eval2(self): sa =", "PTS = [-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS),", "firanka.exceptions import NotInDomainError, DomainError from firanka.intervals import Interval from firanka.series import DiscreteSeries, FunctionSeries,", "self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def test_slice_outdomain(self): series = DiscreteSeries([[0, 0], [1, 1], [2,", "'<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def test_advanced(self): series =", "2), (2, 3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3], 3) def", "= sa.join_discrete(sb, lambda i, a, b: a + b) self.assertEqual(sc.eval_points([0, 1, 2]), [0,", "return k PTS = [-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP,", "sc = sa.join_discrete(sb, lambda i, a, b: a + b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0,", "lambda i, x, y: x * y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series =", "self.assertEqual(sa.data, [(i, i ** 2) for i in PTS]) sa = FunctionSeries(lambda x:", "1), (0, 2), (1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2,", "** 2, '<-10;10)').discretize( [0, 1, 2, 3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda", "self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3], 3) def test_conf(self): self.assertRaises(TypeError, lambda: LinearInterpolationSeries(", "[2, 2]], domain=Interval(0, 3, True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError,", "math import unittest from firanka.exceptions import NotInDomainError, DomainError from firanka.intervals import Interval from", "DiscreteSeries([(0, 0), (1, 0), (2, 0)], '<0;5>') b = DiscreteSeries([(0, 0), (1, 0)],", "series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_domain_sensitivity(self): logs", "for i in PTS]) sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa,", "ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self): series =", "[-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k)", "self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self): # note the invalid data for", "sb = DiscreteSeries([[0, 1], [1, 2], [2, 3]]) sc = sa.join_discrete(sb, lambda i,", "0], [1, 1], [2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda:", "3]) self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]), [3,", "0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s = DiscreteSeries([[0, 0],", "test_slice_outdomain(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def", "'<0; 0>'))) def test_base(self): series = ModuloSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)],", "= DiscreteSeries([(0, 1), (1, 2), (3, 4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i,", "0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS = [-1, -2, -3, 1, 2, 3]", "1, 2, 3, 4, 5] sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS,", "b) PTS = [0, 1, 1.9] EPTS = [x * x ** 2", "(1, 3), (2, 5)]) def test_eval2(self): sa = DiscreteSeries([[0, 0], [1, 1], [2,", "2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2),", "sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2]) self.assertIsInstance(sc,", "1], [2, 2]]) sb = FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb, lambda i, a,", "note the invalid data for covering this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x", "s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s = DiscreteSeries([[0, 0], [1, 1], [2, 2]], domain=Interval(0,", "lambda i, a, b: a + b) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1,", "b) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1,", "DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 2), (2, 4)]) def test_eval2i(self): sa = DiscreteSeries([[0,", "'<0;3)') ser3 = ser1.join(ser2, lambda i, x, y: x * y) class TestLinearInterpolation(unittest.TestCase):", "x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self): # note the", "test_discretize(self): # note the invalid data for covering this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda", "(2, 0)], '<0;5>') b = DiscreteSeries([(0, 0), (1, 0)], '<0;5>') a.join(b, lambda i,", "for i in PTS]) empty = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty())", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = DiscreteSeries([[0, 1], [1, 2], [2,", "2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def", "self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5]) self.assertEqual(sc.data, [(0, 1), (1, 3), (2, 5)])", "'<-10;10)').discretize( [-100, 0, 1, 2, 3, 4, 5], '(-1;6)')) PTS = [-1, 0,", "[1, 1], [2, 2]]) sb = DiscreteSeries([[0, 1], [1, 2], [2, 3]]) sc", "x, y: x * y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0,", "test_base(self): series = ModuloSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEquals(series[3], 1)", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def test_slice(self):", "sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = DiscreteSeries([[0, 1], [1,", "covering this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [0, 1,", "self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def", "[1, 1], [2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i) self.assertEqual(sc.eval_points([0,", "import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def", "self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError,", "import unittest from firanka.exceptions import NotInDomainError, DomainError from firanka.intervals import Interval from firanka.series", "2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def test_slice(self): series = DiscreteSeries([[0, 0], [1, 1],", "[1, 1], [2, 2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda:", "[2, 2]]) sb = DiscreteSeries([[0, 1], [1, 2], [2, 3]]) sc = sa.join_discrete(sb,", "FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError,", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply( lambda k, v: k) self.assertEquals(sb.data, [(0,", "self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1,", "class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)],", "self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self): s = DiscreteSeries([[0, 0],", "x ** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for", "self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s = DiscreteSeries([[0,", "-1, 0, 1, 2, 3, 4, 5]), [3, 1, 2, 3, 1, 2,", "firanka.intervals import Interval from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series from", "x, y: x + y) def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1, 1],", "0], [1, 1], [2, 2]], '<-5;2>')) def test_base(self): s = DiscreteSeries([[0, 0], [1,", "self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0, 1,", "test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a = DiscreteSeries([(0, 0), (1,", "2, '<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda i, x, y:", "[2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def test_slice_outdomain(self): series = DiscreteSeries([[0,", "def test_slice(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sp = series[0.5:1.5]", "import math import unittest from firanka.exceptions import NotInDomainError, DomainError from firanka.intervals import Interval", "import DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series from .common import NOOP, HUGE_IDENTITY class", "(1, 1), (2, 2)]) def test_apply(self): sb = DiscreteSeries([[0, 0], [1, 1], [2,", "0), (1, 1), (2, 2)]) def test_eval3(self): sa = FunctionSeries(lambda x: x **", "1], [1, 2], [2, 3]]) sc = sa.join_discrete(sb, lambda i, a, b: a", "0)], '<0;5>') a.join(b, lambda i, x, y: x + y) def test_uncov(self): self.assertRaises(DomainError,", "'<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda:", "x, a=5, *args, **kwargs): return k PTS = [-1, -2, -3, 1, 2,", "(2, 2)]) def test_apply(self): sb = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply( lambda", "4, 5]), [3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])", "1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS), [x for", "test_eval(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = DiscreteSeries([[0, 1],", "* b) PTS = [0, 1, 1.9] EPTS = [x * x **", "= [-1, 0, 1, 2, 3, 4, 5] sa = FunctionSeries(lambda x: x", "def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1, 1], [2, 2]], '<-5;2>')) def test_base(self):", "-2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x", "a, b: i) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0,", "sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i,", "'(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [-100, 0, 1, 2,", "0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5)", "2)]) def test_apply(self): sb = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply( lambda k,", "4, 5] sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries)", "'<-5;2>')) def test_base(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertEqual(s[0], 0)", "1], [2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def test_slice_outdomain(self): series =", "test_apply(self): sb = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply( lambda k, v: k)", "(1, 2), (3, 4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i, x, y: x", "self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1, 1], [2, 2]], '<-5;2>')) def test_base(self): s =", "lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa = DiscreteSeries([[0, 0], [1,", "= sa.join(sb, lambda i, a, b: a * b) PTS = [0, 1,", "0), (1, 1), (2, 2)]) def test_apply(self): sb = DiscreteSeries([[0, 0], [1, 1],", "0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self): s", "FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [0, 1, 2, 3, 4, 5], '(-1;6)'))", "'<-10;10)').discretize( [0, 1, 2, 3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x", "series = FunctionSeries(NOOP, '<0;2>') sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda:", "test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1,", "1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def test_slice(self): series = DiscreteSeries([[0, 0],", "0, 1, 2, 3, 4, 5]), [3, 1, 2, 3, 1, 2, 3,", "in sc.domain) def test_discretize(self): # note the invalid data for covering this domain", "sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2])", "lambda i, x, y: x + y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda:", "x + y)) class TestModuloSeries(unittest.TestCase): def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>')))", "= FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda i, x, y: x * y)", "DomainError from firanka.intervals import Interval from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries,", "[(0, 0), (1, 1), (2, 2)]) def test_eval3(self): sa = FunctionSeries(lambda x: x", "1], [2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self): sp = DiscreteSeries([[0, 0], [1,", "1.5) def test_apply(self): PTS = [-1, -2, -3, 1, 2, 3] series =", "def test_discretize(self): # note the invalid data for covering this domain self.assertRaises(DomainError, lambda:", "[1, 1], [2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1])", "(2, 3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3], 3) def test_conf(self):", "4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 2), (2, 4)]) def test_eval2i(self): sa", "[2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda:", "[x for x in PTS]) def test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>') dirs =", "2, '<-10;10)').discretize( [-100, 0, 1, 2, 3, 4, 5], '(-1;6)')) PTS = [-1,", "1, 2, 3, 1]) def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x: x ** 2,", "[2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i) self.assertEqual(sc.eval_points([0, 1, 2]),", "4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [-100, 0,", "from .common import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class", "DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 1), (2, 2)]) def test_apply(self): sb = DiscreteSeries([[0,", "[(0, 0), (1, 2), (2, 4)]) def test_eval2i(self): sa = DiscreteSeries([[0, 0], [1,", "'(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self): series = ModuloSeries( DiscreteSeries([(0,", "PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self): # note the invalid data", "2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 1), (2, 2)]) def test_apply(self): sb", "logs = FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0, 1), (1, 2), (3, 4)], '<0;5>')", "sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa = DiscreteSeries([[0, 0], [1, 1],", "i ** 2) for i in PTS]) sa = FunctionSeries(lambda x: x **", "def test_apply(self): PTS = [-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP,", "1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5], 2) def test_translation(self): s = DiscreteSeries([[0, 0], [1,", "def test_base(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5],", "lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [-100, 0, 1, 2, 3, 4,", "sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = FunctionSeries(NOOP, '<0;2>') sc", "-3, -2, -1, 0, 1, 2, 3, 4, 5]), [3, 1, 2, 3,", "[2, 3]]) sc = sa.join_discrete(sb, lambda i, a, b: a + b) self.assertIsInstance(sc,", "def test_eval3(self): sa = FunctionSeries(lambda x: x ** 2, '<-10;10)') sb = FunctionSeries(NOOP,", "(0, 2), (1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3])", "3, 1, 2, 3, 1]) def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x: x **", "0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]])", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]], domain=Interval(0, 3, True, True)) self.assertEqual(s[0], 0)", "a + b) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0,", "*args, **kwargs): return k PTS = [-1, -2, -3, 1, 2, 3] series", "self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s", "a=5, *args, **kwargs): return k PTS = [-1, -2, -3, 1, 2, 3]", "(1, 0)], '<0;5>') a.join(b, lambda i, x, y: x + y) def test_uncov(self):", "5]) self.assertEqual(sc.data, [(0, 1), (1, 3), (2, 5)]) def test_eval2(self): sa = DiscreteSeries([[0,", "series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5)", "0], [1, 1], [2, 2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5], 1) self.assertRaises(NotInDomainError,", "1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0, 1, 2, 3,", "'<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in PTS])", "0>'))) def test_base(self): series = ModuloSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)'))", "5] sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data,", "self.assertEqual(sc.data, [(0, 0), (1, 1), (2, 2)]) def test_apply(self): sb = DiscreteSeries([[0, 0],", "1.5) def test_eval(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb =", "in PTS]) def test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0, 1), (1,", "def test_eval2(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = FunctionSeries(NOOP,", "DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3)", "DiscreteSeries([[0, 0], [1, 1], [2, 2]], domain=Interval(0, 3, True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5],", "[2, 2]]) sb = FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb, lambda i, a, b:", "self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 1),", "** 2, '<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda i, x,", "test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEqual(series[0], 1)", "y: x + y) def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1, 1], [2,", "1], [2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i) self.assertEqual(sc.eval_points([0, 1,", "FunctionSeries(lambda x: x ** 2, '<-10;10)') sb = FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb,", "1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 1), (2, 2)]) def test_apply(self):", "EPTS = [x * x ** 2 for x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS)", "0, 1, 2, 3, 4, 5], '(-1;6)')) PTS = [-1, 0, 1, 2,", "def test_base(self): series = ModuloSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEquals(series[3],", "x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self): series = FunctionSeries(NOOP, '<0;2>')", "self.assertEqual(s[4], 1) def test_slice_outdomain(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertRaises(NotInDomainError,", "test_translate(self): sp = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1)", "3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k) self.assertEqual(series.eval_points(PTS), [x for x in", "self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS = [-1, -2, -3, 1, 2, 3] series", "self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_apply_wild(self): def dzika(k, x, a=5, *args,", "1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s = DiscreteSeries([[0, 0], [1, 1],", "FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb, lambda i, a, b: a + b) self.assertEqual(sc.eval_points([0,", "[-100, 0, 1, 2, 3, 4, 5], '(-1;6)')) PTS = [-1, 0, 1,", "2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self): sp = DiscreteSeries([[0, 0], [1, 1], [2,", "1), (1, 2), (3, 4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i, x, y:", "sc = sa.join_discrete(sb, lambda i, a, b: a + b) self.assertEqual(sc.eval_points([0, 1, 2]),", "def test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf;", "self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s = DiscreteSeries([[0, 0], [1, 1], [2,", "[x * x ** 2 for x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in", "self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1,", "= [-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x:", "y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2), (2,", "self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa = DiscreteSeries([[0, 0],", "for x in PTS]) def test_apply_wild(self): def dzika(k, x, a=5, *args, **kwargs): return", "2, 3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(", "series = LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5],", "self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 1), (2, 2)]) def test_apply(self): sb =", "a, b: a * b) PTS = [0, 1, 1.9] EPTS = [x", "class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a =", "def test_comp_discrete(self): ser1 = ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)')) ser2 = FunctionSeries(NOOP,", "= [0, 1, 1.9] EPTS = [x * x ** 2 for x", "self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1), (0,", "inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self): series = ModuloSeries( DiscreteSeries([(0, 1),", "x + y) def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1, 1], [2, 2]],", "0], [1, 1], [2, 2]]) sb = FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb, lambda", "x: x ** 2, '<-10;10)').discretize(PTS, '(-1;6)') self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2)", "self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertRaises(NotInDomainError, lambda: s[2.5]) s = DiscreteSeries([[0, 0], [1,", "a * b) PTS = [0, 1, 1.9] EPTS = [x * x", "** 2 for x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain) def test_discretize(self):", "2), (2, 3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def", "1, 2]), [0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 2), (2,", "* x ** 2 for x in PTS] self.assertEqual(sc.eval_points(PTS), EPTS) self.assertTrue(Interval('<0;2)') in sc.domain)", "series[0.5:1.5] self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5)", "-4, -3, -2, -1, 0, 1, 2, 3, 4, 5]), [3, 1, 2,", "[0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 1), (2, 2)]) def", "3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_domain_sensitivity(self):", "ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self): series = ModuloSeries( DiscreteSeries([(0, 1), (1, 2), (2,", "[(i, i ** 2) for i in PTS]) empty = FunctionSeries(lambda x: x", "def test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0, 1), (1, 2), (3,", "[2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self): sp = DiscreteSeries([[0, 0], [1, 1],", "'(-1;6)')) PTS = [-1, 0, 1, 2, 3, 4, 5] sa = FunctionSeries(lambda", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2]) def test_translate(self): sp", "self.assertEqual(sc.data, [(0, 1), (1, 3), (2, 5)]) def test_eval2(self): sa = DiscreteSeries([[0, 0],", "1, 2, 3, 4, 5], '(-1;6)')) PTS = [-1, 0, 1, 2, 3,", "** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self): series = FunctionSeries(NOOP, '<0;2>') sp", "i, x, y: x + y) def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1,", "* y) class TestLinearInterpolation(unittest.TestCase): def test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2),", "1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3) def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1),", "FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb, lambda i, a, b: a * b) PTS", "[1, 1], [2, 2]]).apply( lambda k, v: k) self.assertEquals(sb.data, [(0, 0), (1, 1),", "x: x ** 2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self): series = FunctionSeries(NOOP,", "2]), [0, 2, 4]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 2), (2, 4)])", "series = ModuloSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4],", "i ** 2) for i in PTS]) empty = FunctionSeries(lambda x: x **", "FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0, 1), (1, 2), (3, 4)], '<0;5>') self.assertRaises(ValueError, lambda:", ".common import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase): def test_abstract(self): self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase):", "self.assertRaises(NotInDomainError, lambda: s[2.5]) s = DiscreteSeries([[0, 0], [1, 1], [2, 2]], domain=Interval(0, 3,", "1, 2]), [1, 3, 5]) self.assertEqual(sc.data, [(0, 1), (1, 3), (2, 5)]) def", "0, 1, 2, 3, 4, 5] sa = FunctionSeries(lambda x: x ** 2,", "2)]) def test_eval3(self): sa = FunctionSeries(lambda x: x ** 2, '<-10;10)') sb =", "[0, 1, 1.9] EPTS = [x * x ** 2 for x in", "def test_apply(self): sb = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply( lambda k, v:", "= ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)')) ser2 = FunctionSeries(NOOP, '<0;3)') ser3 =", "0], [1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def test_slice(self): series =", "= FunctionSeries(math.log, '(0;5>') dirs = DiscreteSeries([(0, 1), (1, 2), (3, 4)], '<0;5>') self.assertRaises(ValueError,", "3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [-100,", "domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [0, 1, 2, 3,", "DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2)", "self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5]) self.assertEqual(sc.data, [(0, 1), (1, 3),", "FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self): series = ModuloSeries(", "def test_eval(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = DiscreteSeries([[0,", "Interval from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series from .common import", "TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a = DiscreteSeries([(0, 0), (1, 0), (2, 0)], '<0;5>') b", "1), (2, 2)]) def test_apply(self): sb = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply(", "lambda i, a, b: a * b) PTS = [0, 1, 1.9] EPTS", "this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [0, 1, 2,", "domain=Interval(0, 3, True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1])", "def test_slice_outdomain(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertRaises(NotInDomainError, lambda: series[-1:2])", "sc = sa.join(sb, lambda i, a, b: a * b) PTS = [0,", "lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS =", "3, True, True)) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1], 1) self.assertRaises(NotInDomainError, lambda: s[-1]) self.assertEqual(s[2.5],", "self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self): series = ModuloSeries( DiscreteSeries([(0, 1), (1,", "self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5, -4, -3, -2, -1,", "def test_lin(self): series = LinearInterpolationSeries( DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) self.assertEqual(series[0],", "1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x in PTS])", "0) self.assertEqual(sp[2.5], 1) def test_slice(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]])", "sb = FunctionSeries(NOOP, '<0;2)') sc = sa.join(sb, lambda i, a, b: a *", "lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def test_base(self): series = ModuloSeries( DiscreteSeries([(0, 1), (1, 2),", "5)]) def test_eval2(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb =", "DiscreteSeries, FunctionSeries, ModuloSeries, \\ LinearInterpolationSeries, Series from .common import NOOP, HUGE_IDENTITY class TestBase(unittest.TestCase):", "= sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2])", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sp = series[0.5:1.5] self.assertEqual(sp[0.5], 0) self.assertEqual(sp[1.5],", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def", "2, '<-10;10)').discretize([]) self.assertTrue(empty.domain.is_empty()) class TestFunctionSeries(unittest.TestCase): def test_slice(self): series = FunctionSeries(NOOP, '<0;2>') sp =", "(1, 3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5, -4,", "class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a = DiscreteSeries([(0, 0), (1, 0), (2, 0)], '<0;5>')", "self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa", "test_apply_wild(self): def dzika(k, x, a=5, *args, **kwargs): return k PTS = [-1, -2,", "2) for i in PTS]) empty = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([])", "= DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sb = DiscreteSeries([[0, 1], [1, 2],", "k) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_apply_wild(self): def dzika(k, x, a=5,", "invalid data for covering this domain self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x ** 2,", "+ y) def test_uncov(self): self.assertRaises(DomainError, lambda: DiscreteSeries([[0, 0], [1, 1], [2, 2]], '<-5;2>'))", "(2, 2)]) def test_eval3(self): sa = FunctionSeries(lambda x: x ** 2, '<-10;10)') sb", "-3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x in", "[2, 2]]).apply( lambda k, v: k) self.assertEquals(sb.data, [(0, 0), (1, 1), (2, 2)])", "DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0) self.assertEqual(sp[2.5], 1) def test_slice(self): series", "self.assertEqual(sp[2.5], 1) def test_slice(self): series = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sp", "sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS = [-1,", "2), (2, 4)]) def test_eval2i(self): sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]])", "3) def test_advanced(self): series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)], '<-1;2)')) self.assertEqual(series.period,", "sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS = [-1, -2, -3, 1,", "b = DiscreteSeries([(0, 0), (1, 0)], '<0;5>') a.join(b, lambda i, x, y: x", "sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1,", "= [-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x", "** 2, '<-10;10)').discretize( [-100, 0, 1, 2, 3, 4, 5], '(-1;6)')) PTS =", "'(0;5>') dirs = DiscreteSeries([(0, 1), (1, 2), (3, 4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs,", "3)], '<0;3)')) self.assertEqual(series[0], 1) self.assertEqual(series[0.5], 1.5) self.assertEqual(series[1], 2) self.assertEqual(series[2.3], 3) def test_conf(self): self.assertRaises(TypeError,", "lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_apply(self): PTS = [-1, -2, -3,", "self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop, 1.5) def test_eval(self): sa = DiscreteSeries([[0, 0], [1, 1], [2,", "3, 4, 5], '(-1;6)')) PTS = [-1, 0, 1, 2, 3, 4, 5]", "3)], '<-1;2)')) self.assertEqual(series.period, 3.0) self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3]) self.assertEqual(series.eval_points([-5, -4, -3,", "self.assertEqual(sc.data, [(0, 0), (1, 2), (2, 4)]) def test_eval2i(self): sa = DiscreteSeries([[0, 0],", "self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i, x, y: x + y)) class TestModuloSeries(unittest.TestCase): def", "i) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1,", "series[-1:2]) def test_translate(self): sp = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1) self.assertEqual(sp[1.5], 0)", "DiscreteSeries([(0, 1), (1, 2), (3, 4)], '<0;5>') self.assertRaises(ValueError, lambda: dirs.join_discrete(logs, lambda i, x,", "PTS]) def test_apply_wild(self): def dzika(k, x, a=5, *args, **kwargs): return k PTS =", "sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) sc = sa.join_discrete(HUGE_IDENTITY, lambda i,", "2]]) sb = FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb, lambda i, a, b: a", "self.assertEqual(sp[0.5], 0.5) self.assertEqual(sp[1.5], 1.5) self.assertRaises(NotInDomainError, lambda: sp[0]) self.assertRaises(NotInDomainError, lambda: sp[2]) self.assertEqual(sp.domain.start, 0.5) self.assertEqual(sp.domain.stop,", "[1, 1], [2, 2]]) sb = FunctionSeries(NOOP, '<0;2>') sc = sa.join_discrete(sb, lambda i,", "2]]) sb = DiscreteSeries([[0, 1], [1, 2], [2, 3]]) sc = sa.join_discrete(sb, lambda", "0], [1, 1], [2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def test_slice_outdomain(self):", "[1, 1], [2, 2]]).translate(3) self.assertEqual(s[3], 0) self.assertEqual(s[3.5], 0) self.assertEqual(s[4], 1) def test_slice_outdomain(self): series", "test_base(self): s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0)", "self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>') dirs", "test_exceptions(self): self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; 0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)')))", "(1, 1), (2, 2)]) def test_eval3(self): sa = FunctionSeries(lambda x: x ** 2,", "test_apply(self): PTS = [-1, -2, -3, 1, 2, 3] series = FunctionSeries(NOOP, '<-5;5>').apply(lambda", "Series('<-1;1>')[0]) class TestDiscreteSeries(unittest.TestCase): def test_redundancy_skip(self): a = DiscreteSeries([(0, 0), (1, 0), (2, 0)],", "s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]) self.assertEqual(s[0], 0) self.assertEqual(s[0.5], 0) self.assertEqual(s[1],", "3]]) sc = sa.join_discrete(sb, lambda i, a, b: a + b) self.assertIsInstance(sc, DiscreteSeries)", "(1, 2), (2, 3)], '<0;3)')) self.assertEquals(series[3], 1) self.assertEquals(series[4], 2) self.assertEquals(series[5], 3) self.assertEquals(series[-1], 3)", "x: x ** 2, '<-10;10)').discretize( [0, 1, 2, 3, 4, 5], '(-1;6)')) self.assertRaises(NotInDomainError,", "'<-5;5>').apply(dzika) self.assertEqual(series.eval_points(PTS), [x for x in PTS]) def test_domain_sensitivity(self): logs = FunctionSeries(math.log, '(0;5>')", "5], '(-1;6)')) self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize( [-100, 0, 1,", "[3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) def test_comp_discrete(self):", "0>'))) self.assertRaises(ValueError, lambda: ModuloSeries( FunctionSeries(NOOP, '(-inf; inf)'))) self.assertRaises(ValueError, lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>'))) def", "lambda i, a, b: a + b) self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4])", "1, 2]), [0, 1, 2]) self.assertIsInstance(sc, DiscreteSeries) self.assertEqual(sc.data, [(0, 0), (1, 1), (2,", "unittest from firanka.exceptions import NotInDomainError, DomainError from firanka.intervals import Interval from firanka.series import", "2, '<-10;10)').discretize(PTS) self.assertIsInstance(sa, DiscreteSeries) self.assertEqual(sa.data, [(i, i ** 2) for i in PTS])" ]
[ "Command from werkzeug.utils import send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str], Optional[Dict[str,", "from src.utils.remote import Command from werkzeug.utils import send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__) execute:", "from werkzeug.utils import send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]],", "<filename>src/utils/remote/remote_api/routes.py from typing import Any, Callable, Dict, Optional, Union from flask import Blueprint,", "Optional, Union from flask import Blueprint, request from src.utils.remote import Command from werkzeug.utils", "src.utils.remote import Command from werkzeug.utils import send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__) execute: Callable[[Union[Command,", "'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str): global execute data = request.get_json() execute(command,", "Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def index(): return 'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico') def", "send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def", "projector remote API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST'])", "@api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str): global execute data = request.get_json() execute(command, data) return", "str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def index(): return 'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico')", "werkzeug.utils import send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None]", "@api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str):", "index(): return 'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico',", "api_routes_blueprint = Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def index():", "send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str): global execute data =", "API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command:", "from typing import Any, Callable, Dict, Optional, Union from flask import Blueprint, request", "Any, Callable, Dict, Optional, Union from flask import Blueprint, request from src.utils.remote import", "from flask import Blueprint, request from src.utils.remote import Command from werkzeug.utils import send_from_directory", "return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str): global execute data", "execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def index(): return 'Bible projector remote", "import Any, Callable, Dict, Optional, Union from flask import Blueprint, request from src.utils.remote", "Callable[[Union[Command, str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def index(): return 'Bible projector remote API'", "favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str): global execute", "__name__) execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def index(): return 'Bible projector", "return 'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', )", "remote API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def", "methods=['POST']) def command(command: str): global execute data = request.get_json() execute(command, data) return command", "request from src.utils.remote import Command from werkzeug.utils import send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__)", "Union from flask import Blueprint, request from src.utils.remote import Command from werkzeug.utils import", "import Command from werkzeug.utils import send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str],", "Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def index(): return 'Bible", "'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>',", "def index(): return 'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory( api_routes_blueprint.root_path,", ") @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str): global execute data = request.get_json() execute(command, data)", "Callable, Dict, Optional, Union from flask import Blueprint, request from src.utils.remote import Command", "Blueprint, request from src.utils.remote import Command from werkzeug.utils import send_from_directory api_routes_blueprint = Blueprint('api_routes',", "def favicon(): return send_from_directory( api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str): global", "api_routes_blueprint.root_path, 'icon.ico', ) @api_routes_blueprint.route('/<command>', methods=['POST']) def command(command: str): global execute data = request.get_json()", "Dict, Optional, Union from flask import Blueprint, request from src.utils.remote import Command from", "@api_routes_blueprint.route('/') def index(): return 'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return send_from_directory(", "typing import Any, Callable, Dict, Optional, Union from flask import Blueprint, request from", "= Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/') def index(): return", "import Blueprint, request from src.utils.remote import Command from werkzeug.utils import send_from_directory api_routes_blueprint =", "Any]]], None] @api_routes_blueprint.route('/') def index(): return 'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico') def favicon():", "None] @api_routes_blueprint.route('/') def index(): return 'Bible projector remote API' @api_routes_blueprint.route('/favicon.ico') def favicon(): return", "import send_from_directory api_routes_blueprint = Blueprint('api_routes', __name__) execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None] @api_routes_blueprint.route('/')", "flask import Blueprint, request from src.utils.remote import Command from werkzeug.utils import send_from_directory api_routes_blueprint" ]
[ "Unless required by applicable law or agreed to in writing, software # distributed", "% {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private", "self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\",", "os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"),", "Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") )", "1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\")", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\",", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode,", "\"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout = helper.run_rosrepo(\"config\",", "self.assertIn(\"no such Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"),", "\"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\",", "0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1)", "\"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token for Gitlab server without URL\", stdout) exitcode,", "ROS workspaces with multiple Gitlab repositories # # Author: <NAME> # # Copyright", "= PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned =", "test_init_failures(self): \"\"\"Test proper behavior of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode, stdout =", "stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "Gitlab private token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None):", "of 'rosrepo include' and 'rosrepo exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "\"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode,", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout", "dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout", "\"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "\"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\")", "= helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\",", "'rosrepo bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\",", "if workspaces from rosrepo 3.x are upgraded to latest version\"\"\" exitcode, stdout =", "acquire token for Gitlab server without URL\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "def test_build(self): \"\"\"Test proper behavior of 'rosrepo build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "[]), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self): \"\"\"Test proper behavior of 'rosrepo build'\"\"\"", "self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode, stdout", "= helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) os.environ[\"HOME\"] = self.wsdir", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab", "\"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\",", "\"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\",", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "\"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not set\\n\" % {\"wsdir\": self.wsdir, \"env_path\":", "clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode,", "2.x are migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\"))", "= helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode,", "\"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "= helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\",", "= helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout)", "\"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as", "from rosrepo 1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with", "\"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "\"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\",", "os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir,", "bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\")", "pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\",", "{} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected = True", "cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import __version__", "+ [\"%s/src/%s\" % (self.wsdir, d) for d in [\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"])", "\"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout", "sys sys.stderr = sys.stdout from rosrepo.config import Config import test.helper as helper class", "\"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\")", "\"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\",", "default) def test_bash(self): \"\"\"Test proper behavior of 'rosrepo bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\",", "self.assertNotIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout)", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def", "os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout)", "0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\",", "\"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode,", "\"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir)", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\",", "mock import patch except ImportError: from unittest.mock import patch import sys sys.stderr =", "stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\",", "from mock import patch except ImportError: from unittest.mock import patch import sys sys.stderr", "\"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token for Gitlab server without", "[], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\") for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir,", "stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"),", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "return cfg.get(key, default) def test_bash(self): \"\"\"Test proper behavior of 'rosrepo bash'\"\"\" exitcode, stdout", "os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir, \"src\",", "####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode,", "\"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode,", "False}, \"beta\": {\"auto\": False, \"pin\": True}, \"gamma\": {\"auto\": True, \"pin\": False}, \"delta\": {\"auto\":", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode,", "verify Gitlab private token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode,", "{\"auto\": False, \"pin\": True}, \"gamma\": {\"auto\": True, \"pin\": False}, \"delta\": {\"auto\": True, \"pin\":", "\"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\")", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def", "[\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout)", "os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as f:", "####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16)", "= os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir, d) for d in [\"alpha\", \"beta\", \"gamma\"]]", "self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout) def test_buildset(self): \"\"\"Test proper behavior of", "to build\", stdout) helper.failing_programs = [\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\",", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import __version__ as rosrepo_version", "Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode,", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking", "self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from rosrepo 3.x are upgraded to", "self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout)", "read_only=True) return cfg.get(key, default) def test_bash(self): \"\"\"Test proper behavior of 'rosrepo bash'\"\"\" exitcode,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"),", "\"\"\"Test proper behavior of 'rosrepo build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "# ROSREPO # Manage ROS workspaces with multiple Gitlab repositories # # Author:", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\",", "(0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not set\\n\" % {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) )", "self.assertEqual(exitcode, 1) self.assertIn(\"no packages to build\", stdout) helper.failing_programs = [\"catkin_lint\"] exitcode, stdout =", "stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout)", "self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if workspaces from rosrepo 2.x are migrated properly\"\"\"", "\"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\",", "self.assertIn(\"cannot detect ROS distribution\", stdout) os.environ[\"HOME\"] = self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "\"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode,", "\"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test proper behavior of 'rosrepo config'\"\"\" exitcode,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout", "'rosrepo include' and 'rosrepo exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version)", "self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout =", "\"pin\": False}, \"beta\": {\"auto\": False, \"pin\": True}, \"gamma\": {\"auto\": True, \"pin\": False}, \"delta\":", "= True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "not set\\n\" % {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\"", "0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"),", "\"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0)", "helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not", "\"\"\"Test proper behavior of 'rosrepo include' and 'rosrepo exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\",", "= mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\",", "patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode,", "offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode,", "[{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\")", "3.x are upgraded to latest version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "\"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\")", "\"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout =", "\"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\",", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir)", "not use this file except in compliance with the License. # You may", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"config\",", "self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"),", "\"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token", "\"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout =", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout)", "\"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\",", "0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout", "\"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir)", "= Config(self.wsdir, read_only=True) return cfg.get(key, default) def test_bash(self): \"\"\"Test proper behavior of 'rosrepo", "helper.failing_programs = [\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0)", "self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode,", "\"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\",", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"])", "self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout =", "ROSREPO # Manage ROS workspaces with multiple Gitlab repositories # # Author: <NAME>", "self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\")", "\"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout =", "self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\",", "\"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"])", "agreed to in writing, software # distributed under the License is distributed on", "\"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\":", "from unittest.mock import patch import sys sys.stderr = sys.stdout from rosrepo.config import Config", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\")", "1) self.assertIn(\"newer version\", stdout) def test_buildset(self): \"\"\"Test proper behavior of 'rosrepo include' and", "\"pin\": False}, \"delta\": {\"auto\": True, \"pin\": False}, }, default_flow_style=False )) exitcode, stdout =", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "[\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\":", "self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout", "def test_upgrade_from_version_2(self): \"\"\"Test if workspaces from rosrepo 2.x are migrated properly\"\"\" with open(os.path.join(self.wsdir,", "server\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "\"gamma\", []) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\",", "self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\")", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout)", "stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "\"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0)", "# Copyright 2016 <NAME> # # Licensed under the Apache License, Version 2.0", "0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "\"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(", "\"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in", "self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1)", "0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0)", "\"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout", "\"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test proper behavior of 'rosrepo list'\"\"\" exitcode,", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode,", "\"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages to build\", stdout) helper.failing_programs = [\"catkin_lint\"]", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\",", "stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "\"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\": os.pathsep}) ) def test_clean(self): \"\"\"Test proper behavior of", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout)", "blacklisted_key in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode,", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode,", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode,", "to in writing, software # distributed under the License is distributed on an", "\"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\",", "\"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\")", "0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"),", "implied. # See the License for the specific language governing permissions and #", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\")", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout", "\"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with", "tempfile import mkdtemp try: from mock import patch except ImportError: from unittest.mock import", "\"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode,", "\"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "\"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\",", "stdout) helper.failing_programs = [\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode,", "multiple Gitlab repositories # # Author: <NAME> # # Copyright 2016 <NAME> #", "'rosrepo exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout", "self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system", "False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode,", "[\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\")", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") )", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None)", "\"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token for Gitlab server without URL\", stdout)", "\"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\":", "# # Copyright 2016 <NAME> # # Licensed under the Apache License, Version", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout =", "self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0)", "from rosrepo 2.x are migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\",", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0)", "self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if workspaces from rosrepo 1.x", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout)", "self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") ) exitcode, stdout", "shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None self.wsdir = None def get_config_value(self, key, default=None): cfg", "\"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\")", "stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search", "0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"),", "self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode,", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages to build\", stdout) helper.failing_programs", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\",", "you may not use this file except in compliance with the License. #", "\"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout =", "behavior of 'rosrepo list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "[\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\": os.pathsep}) )", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout =", "0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0)", "mkdtemp() self.wsdir = mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"])", "\"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0)", "with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\",", "False}, \"delta\": {\"auto\": True, \"pin\": False}, }, default_flow_style=False )) exitcode, stdout = helper.run_rosrepo(\"init\",", "\"does_not_exist\"), (0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") )", "\"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not set\\n\" % {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"]", ") def test_clean(self): \"\"\"Test proper behavior of 'rosrepo clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\",", "self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout", "\"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import PkgInfo with", "self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test proper behavior", "self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\",", "\"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") ) exitcode,", "stdout) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"),", "0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self): \"\"\"Test proper", "import unittest import os import shutil import yaml import pickle from tempfile import", "\"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0)", "\"Test\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout)", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode,", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\",", "self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\",", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout =", "exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"),", "\"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline mode\", stdout)", "\"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\",", "offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode,", "self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0)", "stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode,", "of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir)", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode, stdout", "\"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout =", "\"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual(", "exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\",", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs = []", "\"999.0\" cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\",", "package\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout", "os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir", "self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\")", "proper behavior of 'rosrepo bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"exclude\",", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout =", "upgraded to latest version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "\"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "[]) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [],", "# Manage ROS workspaces with multiple Gitlab repositories # # Author: <NAME> #", "\"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self): \"\"\"Test proper behavior of", "self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout =", "\"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0)", "1) self.assertIn(\"cannot detect ROS distribution\", stdout) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "\"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\") for blacklisted_key", "helper class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir = mkdtemp() self.wsdir = mkdtemp() self.homedir =", "class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir = mkdtemp() self.wsdir = mkdtemp() self.homedir = mkdtemp()", "= mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\",", "self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [])", "stdout) def test_config(self): \"\"\"Test proper behavior of 'rosrepo config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\",", "\"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\")", "stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout)", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def", "\"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\")", "stdout) def test_buildset(self): \"\"\"Test proper behavior of 'rosrepo include' and 'rosrepo exclude'\"\"\" exitcode,", "\"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\":", "0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1)", "self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\":", "\"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout)", "\"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\")", "shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None self.wsdir = None def", "\"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\",", "open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import", "metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata))", "helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\",", "0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") )", "stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\",", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout)", "1) self.assertIn(\"$HOME\", stdout) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))) self.assertEqual(exitcode, 1)", "\"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "\"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0,", "self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\")", "self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "ROS distribution\", stdout) os.environ[\"HOME\"] = self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "self.ros_root_dir = mkdtemp() self.wsdir = mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\",", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\",", "\"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout", "self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode,", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode,", "self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout =", "[\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout)", "\"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\",", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout", "helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir,", "\"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline mode\",", "self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"),", "self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout)", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write()", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "unittest.mock import patch import sys sys.stderr = sys.stdout from rosrepo.config import Config import", "sys.stderr = sys.stdout from rosrepo.config import Config import test.helper as helper class WorkspaceTest(unittest.TestCase):", "0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0)", "helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\",", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg =", "self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode,", "0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode,", ") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout) exitcode, stdout", "coding=utf-8 # # ROSREPO # Manage ROS workspaces with multiple Gitlab repositories #", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "\"--autocomplete\"), (0, \"Test\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0)", "patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1)", "[]) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"),", "[{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\",", "if workspaces from future rosrepo versions are detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\",", "\"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test proper behavior of 'rosrepo", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\", stdout) exitcode,", "def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None self.wsdir =", "0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") ) exitcode, stdout =", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout)", "f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\",", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir,", "0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "See the License for the specific language governing permissions and # limitations under", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode,", "d in [\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\"", "self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout", "repositories # # Author: <NAME> # # Copyright 2016 <NAME> # # Licensed", "\"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\")", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout = helper.run_rosrepo(\"config\",", "0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\",", "\"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline mode\", stdout) exitcode,", "resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1)", "Dead\") for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"]", "open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as f: metadata = {} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"]", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\",", "= [{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode,", "are upgraded to latest version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout", "= helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\",", "stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def", "\"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline mode\",", "\"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode,", "self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode,", "\".rosrepo\", \"info\"), \"wb\") as f: metadata = {} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] =", "\"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "0) def test_list(self): \"\"\"Test proper behavior of 'rosrepo list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) ####################### exitcode, stdout", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\",", "def setUp(self): self.ros_root_dir = mkdtemp() self.wsdir = mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir,", "\"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"),", "open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False, \"pin\": False},", "d) for d in [\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir),", "0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"),", "self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) os.environ[\"HOME\"] = self.wsdir exitcode, stdout", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "1) self.assertIn(\"no such Gitlab server\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\",", "self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir, d) for", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout", "self.assertIn(\"cannot acquire Gitlab private token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\",", "self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\")", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\",", "detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg = Config(self.wsdir)", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\",", "patch except ImportError: from unittest.mock import patch import sys sys.stderr = sys.stdout from", "\"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not set\\n\" % {\"wsdir\": self.wsdir,", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout)", "self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token for Gitlab server without URL\",", "helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\") for blacklisted_key in [\"ROS_WORKSPACE\",", "\"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\")", "False) ####################### def test_init_failures(self): \"\"\"Test proper behavior of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda", "pickle from tempfile import mkdtemp try: from mock import patch except ImportError: from", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"),", "\"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0)", "helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if workspaces from", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"])", "0) self.assertIn(\"search filter\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0)", "with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as f: metadata = {} metadata[\"alpha\"] = PkgInfo()", "\"sep\": os.pathsep}) ) def test_clean(self): \"\"\"Test proper behavior of 'rosrepo clean'\"\"\" exitcode, stdout", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\",", "metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "\"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs = [] with", "\"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "False, \"pin\": False}, \"beta\": {\"auto\": False, \"pin\": True}, \"gamma\": {\"auto\": True, \"pin\": False},", "\"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode,", "1) self.assertIn(\"missing system package\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode,", "\"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "distribution\", stdout) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0)", "\"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\",", "KIND, either express or implied. # See the License for the specific language", "self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"),", "\"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\",", "\"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\",", "\"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}])", "(0, \"Test\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\",", "\"Test\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout", "1) self.assertIn(\"no packages to build\", stdout) helper.failing_programs = [\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout =", "deprecated=\"Walking Dead\") for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ: del os.environ[blacklisted_key]", "0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"),", "\"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout =", "os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as f: f.write(yaml.safe_dump( { \"alpha\":", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "detect ROS distribution\", stdout) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\")", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "2016 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"),", "0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"),", "\"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "\"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\",", "self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\")", "build\", stdout) helper.failing_programs = [\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\")", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout =", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "ANY KIND, either express or implied. # See the License for the specific", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "def test_bash(self): \"\"\"Test proper behavior of 'rosrepo bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode,", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\",", "\"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode,", "self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout", "\"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking", "self.assertIn(\"newer version\", stdout) def test_buildset(self): \"\"\"Test proper behavior of 'rosrepo include' and 'rosrepo", "1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode,", "exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout) exitcode,", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test", "token for Gitlab server without URL\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\",", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "+ [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\": os.pathsep})", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode,", "list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout =", "= None self.wsdir = None def get_config_value(self, key, default=None): cfg = Config(self.wsdir, read_only=True)", "self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout =", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\",", "with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "\"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout =", "####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout = helper.run_rosrepo(\"config\",", "self.assertIn(\"cannot detect ROS distribution\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode,", "(0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from", "self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [])", "migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"):", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode,", "= helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"999.0\"", "0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout =", "\"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\")", "\"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) ####################### exitcode, stdout =", "self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\")", "workspaces from rosrepo 2.x are migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir,", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout =", "\"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout =", "test_incompatible_new_version(self): \"\"\"Test if workspaces from future rosrepo versions are detected\"\"\" exitcode, stdout =", "0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages to", "stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg =", "def test_upgrade_from_version_1(self): \"\"\"Test if workspaces from rosrepo 1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"),", "open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir,", "\"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout)", "self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout =", "are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"),", "self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self): \"\"\"Test proper behavior", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token", "\"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "\"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from rosrepo", "self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"])", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"])", "stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout =", "self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"),", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0)", "in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\")", "self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\",", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\",", "0) self.assertIn(\"apt-get install\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\")", "self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\")", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def", "self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\")", "stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\",", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout =", "\"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write() exitcode,", "patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout)", "import pickle from tempfile import mkdtemp try: from mock import patch except ImportError:", "are detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg =", "mkdtemp try: from mock import patch except ImportError: from unittest.mock import patch import", "self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\",", "0) self.assertIn(\"cannot verify Gitlab private token in offline mode\", stdout) exitcode, stdout =", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\",", "self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\".rosrepo\")) from rosrepo.common import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as f: metadata", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\")", "under the License. # # import unittest import os import shutil import yaml", "\"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}])", "stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if", "applicable law or agreed to in writing, software # distributed under the License", "of 'rosrepo bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual(", "self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode,", "self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout =", "\"repos\", \".metainfo\"), \"w\") as f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False, \"pin\": False}, \"beta\":", "\"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not set\\n\"", "stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "\"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", ") exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout", "\"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\",", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode,", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode,", "[\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\",", "self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if workspaces from future rosrepo versions are detected\"\"\"", "self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir)))", "False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode,", "self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test proper behavior of 'rosrepo config'\"\"\" exitcode, stdout =", "\"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "self.assertIn(\"cannot verify Gitlab private token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\",", "writing, software # distributed under the License is distributed on an \"AS IS\"", "\"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir,", "from rosrepo.config import Config import test.helper as helper class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir", "version\", stdout) def test_buildset(self): \"\"\"Test proper behavior of 'rosrepo include' and 'rosrepo exclude'\"\"\"", "1) self.assertIn(\"cannot detect ROS distribution\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\")", "self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout", "[]), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\",", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout)", "Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode,", "resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "\"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "\"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode,", "compliance with the License. # You may obtain a copy of the License", "stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\",", "\"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\",", "self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout =", "\"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout) def test_buildset(self): \"\"\"Test proper behavior of 'rosrepo", "0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode,", "given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\",", "self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\",", "1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\")", "self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"),", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write()", "= True f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual(", "\"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\"))", "compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0)", "os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir, d) for d in", "\"info\"), \"wb\") as f: metadata = {} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] = PkgInfo()", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages to build\", stdout) helper.failing_programs =", "\"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "\"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None self.wsdir = None", "\"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as", "stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout =", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), [])", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0)", "\"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode,", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"),", "None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True)", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}])", "self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\")", "with multiple Gitlab repositories # # Author: <NAME> # # Copyright 2016 <NAME>", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout)", "= [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "[\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual(", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\",", "self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\",", "\"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\")", "\"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "\"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\")", "helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\",", "\"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\",", "governing permissions and # limitations under the License. # # import unittest import", "stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot", "\"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\",", "rosrepo 1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir,", "stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode,", "test.helper as helper class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir = mkdtemp() self.wsdir = mkdtemp()", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\",", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"),", "0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"init\",", "\"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") )", "(the \"License\"); # you may not use this file except in compliance with", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout)", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "\"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if workspaces from rosrepo 1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir,", "\"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"):", "\"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot", "from rosrepo 3.x are upgraded to latest version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "self.assertIn(\"no such Gitlab server\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\")", "\"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"\"\"Test proper behavior of 'rosrepo bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\",", "\"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\")", "\"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\")", "build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout =", "file except in compliance with the License. # You may obtain a copy", "stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\",", "stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout)", "1) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode,", "\"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode,", "\"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\",", "dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\")", "\"w\"): pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass", "\"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\",", "\"src\", \"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"),", "from rosrepo.common import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as f: metadata =", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\",", "self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False)", "[]) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self): \"\"\"Test proper behavior of 'rosrepo build'\"\"\" exitcode,", "reported errors\", stdout) helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode, stdout", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode,", "\"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode,", "import test.helper as helper class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir = mkdtemp() self.wsdir =", "(0, \"beta\\ndelta\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0)", "None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir)", "self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import __version__ as", "stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\",", "\"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0)", "os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir, d) for d in [\"alpha\", \"beta\",", "self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable", "\"\"\"Test proper behavior of 'rosrepo config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "\"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\",", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"\"\"Test if workspaces from rosrepo 3.x are upgraded to latest version\"\"\" exitcode, stdout", "= [\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\",", "\"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self): \"\"\"Test proper behavior", "as f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False, \"pin\": False}, \"beta\": {\"auto\": False, \"pin\":", "\"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\",", "\"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\":", "0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir)", "self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode,", "[\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if workspaces from rosrepo 2.x are migrated properly\"\"\" with", "{\"auto\": False, \"pin\": False}, \"beta\": {\"auto\": False, \"pin\": True}, \"gamma\": {\"auto\": True, \"pin\":", "test_upgrade_from_version_2(self): \"\"\"Test if workspaces from rosrepo 2.x are migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"),", "ImportError: from unittest.mock import patch import sys sys.stderr = sys.stdout from rosrepo.config import", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout =", "self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\")", "\"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir)", "{\"auto\": True, \"pin\": False}, }, default_flow_style=False )) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "self.ros_root_dir = None self.wsdir = None def get_config_value(self, key, default=None): cfg = Config(self.wsdir,", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\", stdout) exitcode, stdout =", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0)", "\"delta\": {\"auto\": True, \"pin\": False}, }, default_flow_style=False )) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "behavior of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"init\",", "self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode,", "rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if workspaces from future rosrepo versions are", "self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\")", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode,", "\"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\")", "(0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0)", "f: metadata = {} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected = True", "####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\")", "def test_buildset(self): \"\"\"Test proper behavior of 'rosrepo include' and 'rosrepo exclude'\"\"\" exitcode, stdout", "resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\",", "0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\",", "import os import shutil import yaml import pickle from tempfile import mkdtemp try:", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages to build\", stdout)", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout = helper.run_rosrepo(\"config\",", "stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token", "\"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir, d) for d", "\"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0)", "1) self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None):", "\"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self):", "workspaces from rosrepo 3.x are upgraded to latest version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\",", "self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "[\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from rosrepo 3.x are upgraded to latest", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout = helper.run_rosrepo(\"config\",", "\".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None self.wsdir", "\"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode,", "\"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\",", "\"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\": os.pathsep}) ) def test_clean(self): \"\"\"Test", "# # import unittest import os import shutil import yaml import pickle from", "self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test proper behavior of 'rosrepo", "\"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) #######################", "'rosrepo config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\",", "\"gamma\": {\"auto\": True, \"pin\": False}, \"delta\": {\"auto\": True, \"pin\": False}, }, default_flow_style=False ))", "exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\",", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "\"\"\"Test if workspaces from rosrepo 1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\"))", "0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\")", "self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode,", "0) cfg = Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\",", "0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []),", "2) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1)", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode, stdout =", "\"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"),", "ROS distribution\", stdout) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode,", "WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir = mkdtemp() self.wsdir = mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir)", "metadata = {} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected", "self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\",", "Copyright 2016 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "\"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0)", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode,", "import sys sys.stderr = sys.stdout from rosrepo.config import Config import test.helper as helper", "self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0)", "= helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\",", "\"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout", "####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2)", ") self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\",", "\"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout", "\"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\",", "\"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\")", "and 'rosrepo exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode,", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\",", "self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported", "helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"),", "####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode,", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\",", "self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}])", "[\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout", "os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir, d) for d in [\"alpha\", \"beta\", \"gamma\"]] +", "= helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout)", "self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir,", "self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self): \"\"\"Test proper behavior of", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout = helper.run_rosrepo(\"config\",", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) exitcode,", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\",", "os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as f:", "PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode, stdout", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\",", "URL\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no", "\"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if workspaces from rosrepo", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\",", "= PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode,", "ROS distribution\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "\"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\")", "self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "\"w\") as f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False, \"pin\": False}, \"beta\": {\"auto\": False,", "cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\",", "\"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\")", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) exitcode, stdout", "os.environ[\"HOME\"] = self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\",", "\"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout =", "self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\")", "import shutil import yaml import pickle from tempfile import mkdtemp try: from mock", "\"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as f: f.write(yaml.safe_dump( {", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))) self.assertEqual(exitcode, 1) self.assertIn(\"rosrepo source folder\",", "0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode, stdout", "stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout =", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\",", "in [\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" %", "\"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) exitcode, stdout", "self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0)", "workspaces from future rosrepo versions are detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "8) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) #######################", "None self.wsdir = None def get_config_value(self, key, default=None): cfg = Config(self.wsdir, read_only=True) return", "self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "(0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode,", "private token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\",", "if workspaces from rosrepo 1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir,", "import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if workspaces from future", "<NAME> # # Copyright 2016 <NAME> # # Licensed under the Apache License,", "self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) ####################### exitcode, stdout = helper.run_rosrepo(\"config\",", "the License for the specific language governing permissions and # limitations under the", "helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\") for", "\"beta\": {\"auto\": False, \"pin\": True}, \"gamma\": {\"auto\": True, \"pin\": False}, \"delta\": {\"auto\": True,", "\"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode, stdout", "'rosrepo list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout", "self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if workspaces from rosrepo 1.x are migrated properly\"\"\"", "\"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\") for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode,", "0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0)", "0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0)", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout = helper.run_rosrepo(\"config\",", "\"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\",", "\"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\",", "False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False)", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout =", "self.assertIn(\"no packages to build\", stdout) helper.failing_programs = [\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test proper behavior of 'rosrepo list'\"\"\" exitcode, stdout =", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir,", "variable UNKNOWN is not set\\n\" % {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] =", "without URL\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1)", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode,", "\"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"])", "\"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\",", "0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"),", "True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0)", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] =", "Config(self.wsdir, read_only=True) return cfg.get(key, default) def test_bash(self): \"\"\"Test proper behavior of 'rosrepo bash'\"\"\"", "offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0)", "self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\": os.pathsep}) ) def", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode,", "stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []),", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout)", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout", "\"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode,", "[\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if workspaces from rosrepo 2.x are migrated", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "stdout) os.environ[\"HOME\"] = self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1)", "mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg", "is not set\\n\" % {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] +", "detect ROS distribution\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1)", "\"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout", "[]) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"])", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0,", "self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True)", "\"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) exitcode,", "\"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "Version 2.0 (the \"License\"); # you may not use this file except in", "\"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "[\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1)", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) #######################", "helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\": os.pathsep}) ) def test_clean(self):", "self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\")", "(0, \"\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\",", "[]), [\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages", "stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test proper behavior of 'rosrepo config'\"\"\"", "\"pin\": False}, }, default_flow_style=False )) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "workspaces with multiple Gitlab repositories # # Author: <NAME> # # Copyright 2016", "\"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode,", "stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\")", "\"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) exitcode, stdout =", "cfg.get(key, default) def test_bash(self): \"\"\"Test proper behavior of 'rosrepo bash'\"\"\" exitcode, stdout =", "self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\")", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify", "lambda x: None): exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False)", "blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir", "\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not set\\n\" % {\"wsdir\":", "os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass with", "\"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test proper behavior of 'rosrepo list'\"\"\"", "token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode,", "self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\")", "= helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), [])", "0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout =", "os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\",", "PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as f: metadata = {} metadata[\"alpha\"] =", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}])", "cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire", "[{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\",", "packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0)", "\"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline mode\", stdout) exitcode,", "\"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\")", "helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "\"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import", "False, \"pin\": True}, \"gamma\": {\"auto\": True, \"pin\": False}, \"delta\": {\"auto\": True, \"pin\": False},", "helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [],", "\"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1)", "self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir,", "stdout) helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"build\",", "if workspaces from rosrepo 2.x are migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass", "self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline mode\", stdout) exitcode, stdout", "\"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\",", "cfg[\"version\"] = \"999.0\" cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1)", "True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) #######################", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout =", "self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\",", "self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode,", "\"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout = helper.run_rosrepo(\"include\",", "self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\": os.pathsep}) ) def test_clean(self): \"\"\"Test proper", "metadata[\"beta\"].selected = True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0)", "}, default_flow_style=False )) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual(", "stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode,", "import yaml import pickle from tempfile import mkdtemp try: from mock import patch", "1) self.assertIn(\"cannot acquire Gitlab private token in offline mode\", stdout) exitcode, stdout =", "[\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir,", "# coding=utf-8 # # ROSREPO # Manage ROS workspaces with multiple Gitlab repositories", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout) exitcode,", "self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True)", "\"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout) exitcode, stdout =", "self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0,", "self.assertNotIn(\"epsilon\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\",", "self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"),", "self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "\"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self): \"\"\"Test proper behavior of 'rosrepo build'\"\"\" exitcode, stdout", "self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if workspaces from rosrepo 2.x are", "\"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "= helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir)", "\"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode,", "of 'rosrepo list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode,", "self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode,", "[]) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\",", "properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass", "\"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0)", "####################### def test_init_failures(self): \"\"\"Test proper behavior of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x:", "self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir,", "{ \"alpha\": {\"auto\": False, \"pin\": False}, \"beta\": {\"auto\": False, \"pin\": True}, \"gamma\": {\"auto\":", ")) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\",", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode,", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"),", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0,", ") self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from rosrepo 3.x", "rosrepo import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if workspaces from", "\"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\",", "self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout = helper.run_rosrepo(\"config\",", "\"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0)", "\"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode,", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout", "OF ANY KIND, either express or implied. # See the License for the", "self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg", "\"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\"))", "0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout", "def get_config_value(self, key, default=None): cfg = Config(self.wsdir, read_only=True) return cfg.get(key, default) def test_bash(self):", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode,", "mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1)", "0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\",", "self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\")", "0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0)", "stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir,", "stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode,", "\"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode,", "\"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"),", "\"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "test_upgrade_from_version_1(self): \"\"\"Test if workspaces from rosrepo 1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir,", "x: None): exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0)", "exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout) def", "0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\"))", "exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout =", "\"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\",", "\"beta\\ndelta\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode,", "sys.stdout from rosrepo.config import Config import test.helper as helper class WorkspaceTest(unittest.TestCase): def setUp(self):", "0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode,", "self.assertIn(\"$HOME\", stdout) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))) self.assertEqual(exitcode, 1) self.assertIn(\"rosrepo", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0)", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout =", "get_config_value(self, key, default=None): cfg = Config(self.wsdir, read_only=True) return cfg.get(key, default) def test_bash(self): \"\"\"Test", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\",", "self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x:", "self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "\"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\":", "self.wsdir) self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write() exitcode, stdout =", "\"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir,", "stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\")", "\"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode,", "self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}])", "(0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self):", "are migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir,", "stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\",", "with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False, \"pin\":", "0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"),", "self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no", "or agreed to in writing, software # distributed under the License is distributed", "stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\",", "\"NoURL\"}] cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "self.ros_root_dir) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False)", "self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "= {} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected =", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False)", "\"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode, stdout", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "\"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"])", "in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0)", "License. # You may obtain a copy of the License at # #", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__),", "0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write() exitcode, stdout", "self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\")", "\"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") )", "self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write()", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode,", "behavior of 'rosrepo bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", ") os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir, d) for d in [\"alpha\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self):", "{\"wsdir\": self.wsdir, \"sep\": os.pathsep}) ) def test_clean(self): \"\"\"Test proper behavior of 'rosrepo clean'\"\"\"", "\"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\",", "[\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0)", "1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\")", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "self.wsdir, \"sep\": os.pathsep}) ) def test_clean(self): \"\"\"Test proper behavior of 'rosrepo clean'\"\"\" exitcode,", "self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\")", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode,", "Gitlab server\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0)", "self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}])", "include' and 'rosrepo exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "\"ancient2\", [], deprecated=\"Walking Dead\") for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ:", "\"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\")", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) #######################", "stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\",", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []), [\"beta\"]) exitcode, stdout", "\"--all\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout)", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\",", "self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\",", "permissions and # limitations under the License. # # import unittest import os", "\"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\",", "workspaces from rosrepo 1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\"))", ") self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") ) exitcode, stdout =", "behavior of 'rosrepo include' and 'rosrepo exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True)", "stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode,", "stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "and # limitations under the License. # # import unittest import os import", "f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False, \"pin\": False}, \"beta\": {\"auto\": False, \"pin\": True}, \"gamma\":", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "License, Version 2.0 (the \"License\"); # you may not use this file except", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\", stdout)", "\"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\",", "self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"),", "\"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode,", "test_buildset(self): \"\"\"Test proper behavior of 'rosrepo include' and 'rosrepo exclude'\"\"\" exitcode, stdout =", "\"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\",", "self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\",", "self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [])", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\",", "self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n#", "stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []),", "\"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode,", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout", "proper behavior of 'rosrepo clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0)", "proper behavior of 'rosrepo build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout", "limitations under the License. # # import unittest import os import shutil import", "[\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from rosrepo 3.x are upgraded", "\"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}]", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout =", "\"-j2\") self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test proper behavior of 'rosrepo list'\"\"\" exitcode, stdout", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0)", "\"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if workspaces from rosrepo", "cfg = Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private", "stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint", "helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir,", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"),", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\",", "[{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode,", "\"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\") for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]:", "\"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\",", "\"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self): \"\"\"Test proper behavior of 'rosrepo", "\"\"\"Test proper behavior of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode, stdout", "\"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}])", "\"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual(", "self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0)", "\"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\",", "\"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode,", "if blacklisted_key in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\")", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"]", "stdout) self.assertIn(\"yes\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1)", "\"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as f: f.write(yaml.safe_dump(", "\"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode,", "= Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\")", "stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\",", "rosrepo 3.x are upgraded to latest version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout =", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout =", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [])", "\"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"),", "self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False)", "Gitlab server without URL\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\")", "self.assertIn(\"missing system package\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0)", "test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from rosrepo 3.x are upgraded to latest version\"\"\" exitcode,", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir)", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) ####################### exitcode,", "\"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\",", "for Gitlab server without URL\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\",", "0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir,", "\"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\",", "test_list(self): \"\"\"Test proper behavior of 'rosrepo list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "or implied. # See the License for the specific language governing permissions and", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode,", "self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self):", "\"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode,", "\"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\",", "os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir,", "stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such", "self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\",", "\"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"),", "(0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual(", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode,", "= helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "= Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\")", "cfg = Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0,", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode,", "\"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode,", "self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\",", "\"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\",", "proper behavior of 'rosrepo list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "% (self.wsdir, d) for d in [\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\",", "0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\",", "\"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0)", "private token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\",", "self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0)", "\"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if workspaces from rosrepo 1.x are", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\":", "self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\")", "os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) #######################", "use this file except in compliance with the License. # You may obtain", "\"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\",", "self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout =", "[\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\",", "\"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout =", "\"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir,", "= helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout", "True}, \"gamma\": {\"auto\": True, \"pin\": False}, \"delta\": {\"auto\": True, \"pin\": False}, }, default_flow_style=False", "os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir,", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode,", "shutil import yaml import pickle from tempfile import mkdtemp try: from mock import", "1) self.assertIn(\"cannot detect ROS distribution\", stdout) os.environ[\"HOME\"] = self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\",", "self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode, stdout =", "cfg = Config(self.wsdir, read_only=True) return cfg.get(key, default) def test_bash(self): \"\"\"Test proper behavior of", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs =", "1.x are migrated properly\"\"\" os.rename(os.path.join(self.wsdir, \"src\"), os.path.join(self.wsdir, \"repos\")) os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\",", "cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\")", "Manage ROS workspaces with multiple Gitlab repositories # # Author: <NAME> # #", "(0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") ) exitcode,", "False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "x: None): exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS", "[\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] =", "ignore_errors=True) self.ros_root_dir = None self.wsdir = None def get_config_value(self, key, default=None): cfg =", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs", "for the specific language governing permissions and # limitations under the License. #", "filter\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout)", "self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test", "of 'rosrepo config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with", "stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\", []),", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0)", "# limitations under the License. # # import unittest import os import shutil", "self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"),", ") exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\",", "\"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout =", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"]", "config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0)", "stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout =", "None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"),", "\"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test proper behavior of", "[]) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve", "0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN", "0) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"])", "License. # # import unittest import os import shutil import yaml import pickle", "\"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\",", "\"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\",", "from future rosrepo versions are detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "= True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\",", "helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode,", "proper behavior of 'rosrepo config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0)", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1)", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\",", "self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "\"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\",", "os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\",", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\",", "\"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\")", "default=None): cfg = Config(self.wsdir, read_only=True) return cfg.get(key, default) def test_bash(self): \"\"\"Test proper behavior", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout =", "\"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout", "[{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\",", "1) self.assertIn(\"cannot acquire token for Gitlab server without URL\", stdout) exitcode, stdout =", "\"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\",", "\"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\",", "1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\")", "open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"),", "False}, }, default_flow_style=False )) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"),", "stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout) def test_buildset(self):", "\"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode,", "def test_init_failures(self): \"\"\"Test proper behavior of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None):", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private", "'rosrepo clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\"))", "install\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0)", "'rosrepo build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\",", "open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"),", "with the License. # You may obtain a copy of the License at", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode,", "\"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\", [])", "behavior of 'rosrepo build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "\"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline mode\", stdout)", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\", stdout) exitcode, stdout", "exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout =", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token for", "\"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") ) self.assertEqual(", "\"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test proper behavior of", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout) def test_buildset(self): \"\"\"Test proper", "\"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline mode\",", "acquire Gitlab private token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout =", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout = helper.run_rosrepo(\"config\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\",", "None): exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout)", "= helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode,", "law or agreed to in writing, software # distributed under the License is", "# import unittest import os import shutil import yaml import pickle from tempfile", "self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "os import shutil import yaml import pickle from tempfile import mkdtemp try: from", "= helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if workspaces", "self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode,", "None def get_config_value(self, key, default=None): cfg = Config(self.wsdir, read_only=True) return cfg.get(key, default) def", "os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\")))", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self): \"\"\"Test proper", "self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout = helper.run_rosrepo(\"config\",", "in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def tearDown(self):", "{\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\" % (self.wsdir, d)", "exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout)", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout", "\"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout =", "\"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "in compliance with the License. # You may obtain a copy of the", "\"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout =", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\",", "self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages to build\", stdout) helper.failing_programs = [\"catkin_lint\"] exitcode,", "= helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout) exitcode, stdout =", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True)", "True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir)", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\")", "self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout =", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "\"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\",", "\"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test proper behavior", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self): \"\"\"Test proper behavior of 'rosrepo init'\"\"\"", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout = helper.run_rosrepo(\"config\",", "\"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []),", "self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self): \"\"\"Test proper behavior of 'rosrepo", "stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\",", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "\"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\")", "\"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout = helper.run_rosrepo(\"config\",", "self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "\"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\")", "def test_incompatible_new_version(self): \"\"\"Test if workspaces from future rosrepo versions are detected\"\"\" exitcode, stdout", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode,", "self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\")", "####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0)", "\"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode,", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is deprecated\", stdout)", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab", "= os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir =", "0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"])", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\")", "self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "\"\"\"Test if workspaces from rosrepo 2.x are migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"):", "latest version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout", "\".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import PkgInfo", "distribution\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve", "{\"auto\": True, \"pin\": False}, \"delta\": {\"auto\": True, \"pin\": False}, }, default_flow_style=False )) exitcode,", "self.assertIn(\"yes\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no", "\"\"\"Test if workspaces from future rosrepo versions are detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\",", "<reponame>fkie/rosrepo # coding=utf-8 # # ROSREPO # Manage ROS workspaces with multiple Gitlab", "\"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\",", "1) self.assertIn(\"cannot resolve dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\",", "\"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def test_list(self): \"\"\"Test proper", "\"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"),", "= helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout) def test_buildset(self): \"\"\"Test", "0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0)", "self.assertIn(\"apt-get install\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode,", "\"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\",", "\"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode,", "exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\",", "\"alpha\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\",", "\"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), True) exitcode, stdout =", "\"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\",", "\"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline mode\", stdout) exitcode,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\",", "lambda x: None): exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"),", "self.assertNotIn(\"beta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout)", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout)", "ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None self.wsdir = None def get_config_value(self, key, default=None):", "\"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout) def test_buildset(self): \"\"\"Test proper behavior", "stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages to build\",", "with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode,", "None) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8)", "the specific language governing permissions and # limitations under the License. # #", "self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\")", "cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from", "patch import sys sys.stderr = sys.stdout from rosrepo.config import Config import test.helper as", "self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode,", "[{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0,", "proper behavior of 'rosrepo include' and 'rosrepo exclude'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "\"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\")", "self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout =", "[\"%s/src/%s\" % (self.wsdir, d) for d in [\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual(", "from rosrepo import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if workspaces", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token for Gitlab", "self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode,", "0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\",", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\",", "UNKNOWN is not set\\n\" % {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"]", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode,", "= helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0,", "exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout)", "exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\", stdout)", "import Config import test.helper as helper class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir = mkdtemp()", "= self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout)", "self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self): \"\"\"Test proper behavior of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "\"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages to build\", stdout) helper.failing_programs = [\"catkin_lint\"] exitcode, stdout", "\"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test", "\"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\",", "stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\",", "\".metainfo\"), \"w\") as f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False, \"pin\": False}, \"beta\": {\"auto\":", "this file except in compliance with the License. # You may obtain a", "= helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "behavior of 'rosrepo clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\",", "\"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") )", "0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1)", "self.wsdir = mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir,", "\"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\",", "self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}])", "\"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "\"-w\", self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout = helper.run_rosrepo(\"config\",", "the License. # # import unittest import os import shutil import yaml import", "\"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout =", "\"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\",", "self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout)", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode,", "\"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\",", "self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"),", "self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode,", "def test_config(self): \"\"\"Test proper behavior of 'rosrepo config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "self.assertIn(\"cannot detect ROS distribution\", stdout) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\",", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode,", "\"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) #######################", "server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None) exitcode, stdout", "0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\",", "rosrepo 2.x are migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] =", "\"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\",", "None): exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\",", "behavior of 'rosrepo config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1)", "[], deprecated=\"Walking Dead\") for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ: del", "self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), [\"alpha\"]) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\",", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] =", "pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import PkgInfo with open(os.path.join(self.wsdir,", "stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) self.assertIn(\"delta\",", "\"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir,", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab", "for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] =", "\"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline mode\", stdout)", "\"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "\"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode,", "yaml import pickle from tempfile import mkdtemp try: from mock import patch except", "\"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\"))", "os.makedirs(os.path.join(self.wsdir, \"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"):", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir,", "\"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"),", "import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as f: metadata = {} metadata[\"alpha\"]", "\"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass with", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout)", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout =", "\"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0)", "test_build(self): \"\"\"Test proper behavior of 'rosrepo build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if workspaces from future rosrepo versions", "self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))) self.assertEqual(exitcode,", "self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not set\\n\" %", "\"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout)", "in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in os.environ: del os.environ[blacklisted_key] os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"]", "[]), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"])", "versions are detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg", "\"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\",", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode, stdout =", "helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir,", "\"-w\", self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\",", "self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1)", "= helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1)", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode,", "required by applicable law or agreed to in writing, software # distributed under", "\"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is not set\\n\" % {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]})", "\"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "= Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0)", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\",", "self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\",", "system package\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\") self.assertEqual(exitcode, 0) exitcode,", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout)", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout =", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout)", "helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\") for blacklisted_key in [\"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\"]: if blacklisted_key in", "exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout", "rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if workspaces from future rosrepo versions are detected\"\"\" exitcode,", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write() exitcode,", "stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 0) exitcode, stdout =", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode,", "= helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def test_config(self):", "= sys.stdout from rosrepo.config import Config import test.helper as helper class WorkspaceTest(unittest.TestCase): def", "\"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode,", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0)", "\"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\",", "\"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode,", "self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"),", "\"--autocomplete\"), (0, \"\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\",", "self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"),", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode,", "mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\", \"installed-system\"]) helper.create_package(self.wsdir, \"beta\", [\"delta\"]) helper.create_package(self.wsdir, \"gamma\", [])", "helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode, stdout", "self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) exitcode, stdout", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout", "stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\",", "self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode,", "self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout)", "stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [])", "for d in [\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir), (0,", "\".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"),", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"]", "self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"999.0\" cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\",", "in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode,", "0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "packages to build\", stdout) helper.failing_programs = [\"catkin_lint\"] exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages", "\"pin\": True}, \"gamma\": {\"auto\": True, \"pin\": False}, \"delta\": {\"auto\": True, \"pin\": False}, },", "1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\")", "\"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\")", "self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout)", "of 'rosrepo clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir,", "0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test if workspaces from rosrepo 1.x are migrated", "self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", ") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\",", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x:", "Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\",", "rosrepo.config import Config import test.helper as helper class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir =", "default_flow_style=False )) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\",", "\"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\",", "\"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") ) exitcode, stdout", "import patch import sys sys.stderr = sys.stdout from rosrepo.config import Config import test.helper", "[]) def test_build(self): \"\"\"Test proper behavior of 'rosrepo build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\",", "exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) os.environ[\"HOME\"]", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), True) exitcode, stdout = helper.run_rosrepo(\"config\",", "self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token for Gitlab server without URL\", stdout) exitcode, stdout", "self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\",", "# you may not use this file except in compliance with the License.", "specific language governing permissions and # limitations under the License. # # import", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from rosrepo.common", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\",", "__version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if workspaces from future rosrepo", "0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir,", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout", "[]) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"beta\\ndelta\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\",", "helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) os.environ[\"HOME\"] = self.wsdir exitcode,", "\"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode,", "0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\",", "of 'rosrepo build'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode,", "\"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\",", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout =", "stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) os.environ[\"HOME\"] =", "self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout) def test_buildset(self): \"\"\"Test proper behavior of 'rosrepo include'", "\".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir,", "self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0)", "self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\", []), []) def test_build(self): \"\"\"Test", "self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\", self.wsdir, \"ROS_WORKSPACE\", \"ROS_PACKAGE_PATH\", \"PATH\", \"UNKNOWN\"), (0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=%(wsdir)s/src\\nPATH=%(env_path)s\\n# variable UNKNOWN is", "try: from mock import patch except ImportError: from unittest.mock import patch import sys", "[]), []) def test_build(self): \"\"\"Test proper behavior of 'rosrepo build'\"\"\" exitcode, stdout =", "server without URL\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode,", "= None def get_config_value(self, key, default=None): cfg = Config(self.wsdir, read_only=True) return cfg.get(key, default)", "token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--gitlab-login\",", "\"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\",", "\"-w\", self.wsdir, \"--clean\", \"--dry-run\", \"--offline\", \"--verbose\", \"--no-status\", \"--keep-going\", \"-j2\") self.assertEqual(exitcode, 0) def test_list(self):", "[{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\")", "self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\":", "License for the specific language governing permissions and # limitations under the License.", "\"wb\") as f: metadata = {} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode, stdout =", "\"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\",", "\"<PASSWORD>\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\")", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\",", "self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\",", "self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline", "\"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0)", "0) cfg = Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"),", "True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) #######################", "0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\",", "os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir,", "self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\",", "stdout) self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test proper behavior of 'rosrepo config'\"\"\" exitcode, stdout", "True, \"pin\": False}, \"delta\": {\"auto\": True, \"pin\": False}, }, default_flow_style=False )) exitcode, stdout", "os.environ[\"HOME\"] = self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True)", "\"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def", "as helper class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir = mkdtemp() self.wsdir = mkdtemp() self.homedir", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer version\", stdout)", "self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if", "\"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda", "\"--no-job-limit\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--install\")", "\"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode,", "\"delta\")) with open(os.path.join(self.wsdir, \"repos\", \".metainfo\"), \"w\") as f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False,", "\"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout", "helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) self.assertEqual(self.get_config_value(\"default_build\", []), []) self.assertEqual(self.get_config_value(\"pinned_build\",", "except ImportError: from unittest.mock import patch import sys sys.stderr = sys.stdout from rosrepo.config", "0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self): \"\"\"Test proper behavior of 'rosrepo init'\"\"\" with", "setUp(self): self.ros_root_dir = mkdtemp() self.wsdir = mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\",", "0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\")))", "helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\",", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\")", "test_config(self): \"\"\"Test proper behavior of 'rosrepo config'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode,", "set\\n\" % {\"wsdir\": self.wsdir, \"env_path\": os.environ[\"PATH\"]}) ) os.environ[\"ROS_PACKAGE_PATH\"] = os.pathsep.join([\"/before\"] + [\"%s/src/%s\" %", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "self.wsdir, \"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "\"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline mode\", stdout)", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self):", "\"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode,", "self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir)", "stdout) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))) self.assertEqual(exitcode, 1) self.assertIn(\"rosrepo source", "None): exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS", "(0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if workspaces from", "self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def", "self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\",", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout", "packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\", \"alpha\") self.assertEqual(exitcode, 0)", "f: f.write(yaml.safe_dump( { \"alpha\": {\"auto\": False, \"pin\": False}, \"beta\": {\"auto\": False, \"pin\": True},", "self.assertIn(\"catkin_lint reported errors\", stdout) helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode,", "self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0)", "def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from rosrepo 3.x are upgraded to latest version\"\"\"", "self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\",", "def test_clean(self): \"\"\"Test proper behavior of 'rosrepo clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True)", "self.assertIn(\"cannot acquire token for Gitlab server without URL\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\":", "\"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\")", "\"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\",", "\"8\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 8) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-job-limit\") self.assertEqual(exitcode,", "= mkdtemp() self.wsdir = mkdtemp() self.homedir = mkdtemp() helper.create_fake_ros_root(self.ros_root_dir) helper.create_package(self.wsdir, \"alpha\", [\"beta\", \"gamma\",", "self.wsdir, \"--dry-run\", \"alpha\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout)", "version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout =", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout) exitcode,", "rosrepo.common import PkgInfo with open(os.path.join(self.wsdir, \".rosrepo\", \"info\"), \"wb\") as f: metadata = {}", ") from rosrepo import __version__ as rosrepo_version self.assertEqual(self.get_config_value(\"version\"), rosrepo_version) def test_incompatible_new_version(self): \"\"\"Test if", "ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None self.wsdir = None def get_config_value(self,", "test_clean(self): \"\"\"Test proper behavior of 'rosrepo clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "os.pathsep}) ) def test_clean(self): \"\"\"Test proper behavior of 'rosrepo clean'\"\"\" exitcode, stdout =", "\"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"), os.path.join(self.wsdir, \"src\", \"delta\")) with open(os.path.join(self.wsdir,", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\")", "self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "2.0 (the \"License\"); # you may not use this file except in compliance", "test_bash(self): \"\"\"Test proper behavior of 'rosrepo bash'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "Config import test.helper as helper class WorkspaceTest(unittest.TestCase): def setUp(self): self.ros_root_dir = mkdtemp() self.wsdir", "def test_list(self): \"\"\"Test proper behavior of 'rosrepo list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\",", "\"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline", "self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True)", "such Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\") self.assertEqual(self.get_config_value(\"ros_root\"), None)", "= helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"$HOME\", stdout) exitcode, stdout = helper.run_rosrepo(\"init\",", "1) self.assertIn(\"no such Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-ros-root\")", "exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\",", "# # ROSREPO # Manage ROS workspaces with multiple Gitlab repositories # #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"ancient\", \"ancient2\") self.assertEqual(exitcode, 0) self.assertIn(\"is", "\"src\")) with open(os.path.join(self.wsdir, \"src\", \"CMakeLists.txt\"), \"w\"): pass with open(os.path.join(self.wsdir, \"src\", \"toplevel.cmake\"), \"w\"): pass", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"),", "\"alpha\": {\"auto\": False, \"pin\": False}, \"beta\": {\"auto\": False, \"pin\": True}, \"gamma\": {\"auto\": True,", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode,", "\"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", ") self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"),", "# # Unless required by applicable law or agreed to in writing, software", "distribution\", stdout) os.environ[\"HOME\"] = self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "[{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\",", "stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\",", "express or implied. # See the License for the specific language governing permissions", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout =", "self.wsdir, \"--gitlab-logout\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) exitcode, stdout =", "lambda x: None): exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect", "pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\"))", "properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\")) from", "either express or implied. # See the License for the specific language governing", ") self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if workspaces from rosrepo 2.x", "\"<PASSWORD>\"}]) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--get-gitlab-url\", \"does_not_exist\"), (0, \"\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\",", "\"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\", stdout) def test_config(self): \"\"\"Test proper", "0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout)", "migrated properly\"\"\" with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.makedirs(os.path.join(self.wsdir, \".catkin_tools\", \"profiles\", \"rosrepo\")) os.makedirs(os.path.join(self.wsdir, \".rosrepo\"))", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\",", "\"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0)", "stdout) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"epsilon\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve", "\"\"\"Test proper behavior of 'rosrepo clean'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) os.environ[\"HOME\"] = self.wsdir exitcode, stdout =", "exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\"))) def test_upgrade_from_version_1(self): \"\"\"Test", "\"-w\", self.wsdir, \"--set-default\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\",", "False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"clang\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"),", "= helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) cfg = Config(self.wsdir) cfg[\"version\"] = \"3.0.0a0\"", "self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\",", "\"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0, \"\\n\") )", "mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) exitcode, stdout", "the License. # You may obtain a copy of the License at #", "\"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_version_2(self): \"\"\"Test if workspaces", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout =", "such Gitlab server\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode,", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout", "= helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout)", "language governing permissions and # limitations under the License. # # import unittest", "exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-Pv\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout)", "\"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}])", "self.wsdir, \"--get-gitlab-url\", \"Test\"), (0, \"http://localhost\\n\") ) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\", \"--autocomplete\"), (0,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) ####################### def test_init_failures(self): \"\"\"Test", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout =", "self.assertEqual(exitcode, 0) self.assertTrue(os.path.isdir(os.path.join(self.wsdir, \"build\"))) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir,", "\"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline mode\",", "self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode,", "self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "as f: metadata = {} metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected =", "# Author: <NAME> # # Copyright 2016 <NAME> # # Licensed under the", "self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--clean\")", "self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline mode\", stdout) exitcode, stdout", "self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\")", "self.assertEqual(self.get_config_value(\"use_rosclipse\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False)", "exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\",", "[\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir, \"ancient2\", [], deprecated=\"Walking Dead\") for blacklisted_key in", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "True f.write(pickle.dumps(metadata)) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\",", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))) self.assertEqual(exitcode, 1) self.assertIn(\"rosrepo source folder\", stdout)", "\"-w\", self.wsdir, \"-P\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertNotIn(\"delta\", stdout) exitcode, stdout", "True, \"pin\": False}, }, default_flow_style=False )) exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.assertEqual(self.get_config_value(\"default_build\", []), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"),", "Gitlab repositories # # Author: <NAME> # # Copyright 2016 <NAME> # #", "\"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout", "\"1\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 1) self.assertIn(\"no", "\"src\", \"toplevel.cmake\"), \"w\"): pass with open(os.path.join(self.wsdir, \".catkin_workspace\"), \"w\"): pass os.symlink(os.path.join(\"..\", \"repos\", \"alpha\"), os.path.join(self.wsdir,", "self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect ROS distribution\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\",", "with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir) self.assertEqual(exitcode, 1)", "shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir, ignore_errors=True) self.ros_root_dir = None self.wsdir = None def get_config_value(self, key,", "rosrepo versions are detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0)", "self.wsdir, \"--offline\", \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in offline", "errors\", stdout) helper.failing_programs = [] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode, stdout =", "self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown compiler\", stdout) self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout =", "\"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"), os.path.join(self.wsdir, \"src\", \"gamma\")) os.symlink(os.path.join(\"..\", \"repos\", \"delta\"),", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout", "\"build\")) exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) self.assertFalse(os.path.isdir(os.path.join(self.wsdir, \"build\")))", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token in", "helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self):", "\"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) ####################### self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) helper.run_rosrepo(\"config\",", "helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire token for Gitlab server", "\"alpha\\nbeta\\ndelta\\ngamma\\n\") ) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(", "\"incomplete\") self.assertEqual(exitcode, 1) self.assertIn(\"missing system package\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir,", "given\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"pinned_build\",", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) self.assertEqual(", "unittest import os import shutil import yaml import pickle from tempfile import mkdtemp", "self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0)", "% {\"wsdir\": self.wsdir, \"sep\": os.pathsep}) ) def test_clean(self): \"\"\"Test proper behavior of 'rosrepo", "\"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\":", "self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 0) exitcode,", "stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"--set-pinned\") self.assertEqual(exitcode, 1) self.assertIn(\"no packages given\", stdout) exitcode,", "metadata[\"alpha\"] = PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned", "metadata[\"alpha\"].selected = True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned = True f.write(pickle.dumps(metadata)) exitcode, stdout =", "import patch except ImportError: from unittest.mock import patch import sys sys.stderr = sys.stdout", "private token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\")", "(self.wsdir, d) for d in [\"alpha\", \"beta\", \"gamma\"]] + [\"/after\"]) self.assertEqual( helper.run_rosrepo(\"bash\", \"-w\",", "\"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "0) cfg = Config(self.wsdir) cfg[\"gitlab_servers\"] = [{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\",", "self.assertEqual(self.get_config_value(\"default_build\"), [\"alpha\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), [\"beta\"]) def test_upgrade_from_older_version_3(self): \"\"\"Test if workspaces from rosrepo 3.x are", "\"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode,", "self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-PD\") self.assertEqual(exitcode, 0) self.assertNotIn(\"alpha\", stdout)", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "\"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"]) helper.create_package(self.wsdir, \"incomplete\", [\"missing-system\"]) helper.create_package(self.wsdir, \"ancient\", [], deprecated=True) helper.create_package(self.wsdir,", "\"--show-gitlab-urls\", \"--autocomplete\"), (0, \"Test\\n\") ) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode,", "self.assertEqual(self.get_config_value(\"use_catkin_lint\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False)", "stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-W\") self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"epsilon\",", "\"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\")", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"list\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab", "key, default=None): cfg = Config(self.wsdir, read_only=True) return cfg.get(key, default) def test_bash(self): \"\"\"Test proper", "[{\"label\": \"NoURL\"}] cfg.write() exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"NoURL\") self.assertEqual(exitcode, 1)", "self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0)", "\"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertIn(\"cannot verify Gitlab private token", "\"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--dry-run\", \"alpha\")", "x: None): exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot detect", "[{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\",", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"),", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\",", "[\"delta\"]) helper.create_package(self.wsdir, \"gamma\", []) helper.create_package(self.wsdir, \"delta\", []) helper.create_package(self.wsdir, \"epsilon\", [\"broken\"]) helper.create_package(self.wsdir, \"broken\", [\"missing\"])", "self.wsdir, \"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None) ####################### exitcode,", "[]) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"incomplete\") self.assertEqual(exitcode, 0) self.assertIn(\"apt-get install\",", "self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\") self.assertEqual(exitcode, 0)", "self.wsdir = None def get_config_value(self, key, default=None): cfg = Config(self.wsdir, read_only=True) return cfg.get(key,", "\"http://localhost\", \"--store-credentials\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>token\"}]) exitcode, stdout", "to latest version\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode,", "= \"999.0\" cfg.write() exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\") self.assertEqual(exitcode, 1) self.assertIn(\"newer", "self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) exitcode,", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout)", "self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "self.assertEqual(self.get_config_value(\"use_env_cache\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True)", "0) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout)", "exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--offline\", \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot", "'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"init\", self.wsdir) self.assertEqual(exitcode,", "\"--set-ros-root\", self.ros_root_dir) self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"ros_root\"), self.ros_root_dir) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\",", "self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-compiler\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"compiler\"), None)", "self.assertEqual(self.get_config_value(\"compiler\"), \"clang\") exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-compiler\", \"does_not_exist\") self.assertEqual(exitcode, 1) self.assertIn(\"unknown", "self.wsdir, \"--gitlab-logout\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout =", "\"Test\", \"url\": \"http://localhost\", \"private_token\": \"t0ps3cr3t\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--remove-credentials\") self.assertEqual(exitcode,", "= helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertNotIn(\"beta\", stdout) exitcode, stdout", "(0, \"ROS_WORKSPACE=%(wsdir)s\\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\\n\" % {\"wsdir\": self.wsdir, \"sep\": os.pathsep}) ) def test_clean(self): \"\"\"Test proper behavior", "= \"3.0.0a0\" cfg.write() self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") ) from rosrepo", "\"\"\"Test proper behavior of 'rosrepo list'\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir)", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\":", "except in compliance with the License. # You may obtain a copy of", "\"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-env-cache\") self.assertEqual(exitcode,", "self.assertEqual(self.get_config_value(\"store_credentials\"), False) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}]) exitcode, stdout = helper.run_rosrepo(\"config\",", "\"--no-rosclipse\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_rosclipse\"), False) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--rosclipse\") self.assertEqual(exitcode,", "[] with patch(\"rosrepo.cmd_build.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"build\", \"-w\", self.wsdir, \"alpha\")", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"16\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), 16) exitcode, stdout =", "self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"clean\", \"-w\", self.wsdir, \"--dry-run\")", "\"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\",", "from tempfile import mkdtemp try: from mock import patch except ImportError: from unittest.mock", "\"http://localhost\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot acquire Gitlab private token in offline mode\", stdout) exitcode,", "\"alpha\"), os.path.join(self.wsdir, \"src\", \"alpha\")) os.symlink(os.path.join(\"..\", \"repos\", \"beta\"), os.path.join(self.wsdir, \"src\", \"beta\")) os.symlink(os.path.join(\"..\", \"repos\", \"gamma\"),", "\"-w\", self.wsdir, \"--gitlab-login\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\", \"private_token\": \"<PASSWORD>\"}])", "\"-w\", self.wsdir, \"-a\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"default_build\"), []) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0,", "helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\",", "self.assertEqual(exitcode, 0) with patch(\"rosrepo.cmd_config.find_ros_root\", lambda x: None): exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir)", "stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode, 0) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout =", "\"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_servers\"), [{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode,", "\"<PASSWORD>token\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--set-gitlab-url\", \"Test\", \"http://localhost\", \"--private-token\", \"<PASSWORD>\", \"--store-credentials\")", "[{\"label\": \"Test\", \"url\": \"http://localhost\"}]) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--gitlab-login\", \"Test\", \"--private-token\",", "stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\") self.assertEqual(exitcode, 0) self.assertIn(\"search filter\", stdout) exitcode, stdout", "# # Author: <NAME> # # Copyright 2016 <NAME> # # Licensed under", "0) self.assertNotIn(\"alpha\", stdout) self.assertNotIn(\"gamma\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"build\",", "self.wsdir, \"--set-gitlab-crawl-depth\", \"2\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 2) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "PkgInfo() metadata[\"beta\"] = PkgInfo() metadata[\"alpha\"].selected = True metadata[\"beta\"].selected = True metadata[\"beta\"].pinned = True", "future rosrepo versions are detected\"\"\" exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir, self.wsdir) self.assertEqual(exitcode,", "stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "proper behavior of 'rosrepo init'\"\"\" with patch(\"rosrepo.cmd_init.find_ros_root\", lambda x: None): exitcode, stdout =", "= helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--env-cache\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_env_cache\"), True) exitcode, stdout = helper.run_rosrepo(\"config\",", "self.wsdir, \"--show-gitlab-urls\") self.assertEqual(exitcode, 0) self.assertIn(\"Test\", stdout) self.assertIn(\"http://localhost\", stdout) self.assertIn(\"yes\", stdout) exitcode, stdout =", "True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-catkin-lint\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"use_catkin_lint\"), False) #######################", "dependencies\", stdout) self.assertEqual(self.get_config_value(\"default_build\"), [\"beta\"]) self.assertEqual(self.get_config_value(\"pinned_build\"), []) exitcode, stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\",", "self.assertIn(\"search filter\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-S\") self.assertEqual(exitcode, 0) self.assertIn(\"alpha\",", "Author: <NAME> # # Copyright 2016 <NAME> # # Licensed under the Apache", "= self.homedir os.environ[\"XDG_CONFIG_HOME\"] = os.path.join(self.homedir, \".config\") def tearDown(self): shutil.rmtree(self.wsdir, ignore_errors=True) shutil.rmtree(self.homedir, ignore_errors=True) shutil.rmtree(self.ros_root_dir,", "self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) self.assertNotIn(\"epsilon\", stdout) exitcode, stdout = helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-BC\")", "self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout) exitcode, stdout = helper.run_rosrepo(\"exclude\", \"-w\",", "16) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"0\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"job_limit\"), None)", "self.assertIn(\"is deprecated\", stdout) self.assertIn(\"Walking Dead\", stdout) os.makedirs(os.path.join(self.wsdir, \"build\")) exitcode, stdout = helper.run_rosrepo(\"init\", \"--reset\",", "helper.run_rosrepo(\"build\", \"-w\", self.wsdir) self.assertEqual(exitcode, 0) self.assertIn(\"alpha\", stdout) self.assertIn(\"beta\", stdout) self.assertIn(\"gamma\", stdout) self.assertIn(\"delta\", stdout)", "import mkdtemp try: from mock import patch except ImportError: from unittest.mock import patch", "self.assertEqual(exitcode, 1) self.assertIn(\"no such Gitlab server\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "token in offline mode\", stdout) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--unset-gitlab-url\", \"Test\")", "0) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--job-limit\", \"1\") self.assertEqual(exitcode, 0) exitcode, stdout", "detect ROS distribution\", stdout) os.environ[\"HOME\"] = self.wsdir exitcode, stdout = helper.run_rosrepo(\"init\", \"-r\", self.ros_root_dir,", "self.wsdir, \"--pinned\", \"beta\") self.assertEqual(exitcode, 0) self.assertEqual( helper.run_rosrepo(\"list\", \"-w\", self.wsdir, \"-n\"), (0, \"alpha\\nbeta\\ndelta\\ngamma\\n\") )", "True) exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir, \"--no-install\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"install\"), False) #######################", "\"--set-gitlab-crawl-depth\", \"1\") self.assertEqual(exitcode, 0) self.assertEqual(self.get_config_value(\"gitlab_crawl_depth\"), 1) ####################### exitcode, stdout = helper.run_rosrepo(\"config\", \"-w\", self.wsdir,", "stdout = helper.run_rosrepo(\"include\", \"-w\", self.wsdir, \"--default\", \"--all\") self.assertEqual(exitcode, 1) self.assertIn(\"cannot resolve dependencies\", stdout)" ]
[ "{ \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg) def profile(self):", "logger.info(\"Could not load profiler output %s\", value) return self.profiles if __name__ == \"__main__\":", "formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter)", "{} self._outputs = [] self.actuator = Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile Infos\")", "Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg) def profile(self): self._outputs = self.actuator.get_acts() for", "self.actuator.get_acts() for value in self._outputs.values(): name = value.get(\"name\", None) if name: self.profiles[name] =", "logging.getLogger(__name__) class Profiler: FILES = 'info' FILES_PREFIX = 'info_' FILES_SUFFIX = 'py' def", "logger = logging.getLogger(__name__) class Profiler: FILES = 'info' FILES_PREFIX = 'info_' FILES_SUFFIX =", "name: self.profiles[name] = value else: logger.info(\"Could not load profiler output %s\", value) return", "self._outputs.values(): name = value.get(\"name\", None) if name: self.profiles[name] = value else: logger.info(\"Could not", "profiler output %s\", value) return self.profiles if __name__ == \"__main__\": level = logging.DEBUG", "cfg_acts(self): logger.info(\"Loading Profile Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = { \"folder\":", "level = logging.DEBUG formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler", "def __init__(self): self.profiles = {} self._outputs = [] self.actuator = Actuator() self.cfg_acts() def", "def profile(self): self._outputs = self.actuator.get_acts() for value in self._outputs.values(): name = value.get(\"name\", None)", "import Actuator logger = logging.getLogger(__name__) class Profiler: FILES = 'info' FILES_PREFIX = 'info_'", "self._outputs = self.actuator.get_acts() for value in self._outputs.values(): name = value.get(\"name\", None) if name:", "Profile Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = { \"folder\": folder, \"prefix\":", "handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger = logging.getLogger(__name__) prfl = Profiler() msg = prfl.profile() print(msg)", "= value else: logger.info(\"Could not load profiler output %s\", value) return self.profiles if", "= { \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg) def", "\"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg) def profile(self): self._outputs = self.actuator.get_acts() for value", "FILES = 'info' FILES_PREFIX = 'info_' FILES_SUFFIX = 'py' def __init__(self): self.profiles =", "for value in self._outputs.values(): name = value.get(\"name\", None) if name: self.profiles[name] = value", "logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger = logging.getLogger(__name__) prfl = Profiler() msg =", "folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = { \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\":", "- %(name)s - %(levelname)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level)", "== \"__main__\": level = logging.DEBUG formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -", "self.actuator.cfg(cfg) def profile(self): self._outputs = self.actuator.get_acts() for value in self._outputs.values(): name = value.get(\"name\",", "self.actuator = Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)),", "self.profiles = {} self._outputs = [] self.actuator = Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading", "= 'py' def __init__(self): self.profiles = {} self._outputs = [] self.actuator = Actuator()", "= logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger = logging.getLogger(__name__) prfl = Profiler() msg", "'info' FILES_PREFIX = 'info_' FILES_SUFFIX = 'py' def __init__(self): self.profiles = {} self._outputs", "Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg", "gym.common.process import Actuator logger = logging.getLogger(__name__) class Profiler: FILES = 'info' FILES_PREFIX =", "in self._outputs.values(): name = value.get(\"name\", None) if name: self.profiles[name] = value else: logger.info(\"Could", "= logging.getLogger(__name__) class Profiler: FILES = 'info' FILES_PREFIX = 'info_' FILES_SUFFIX = 'py'", "def cfg_acts(self): logger.info(\"Loading Profile Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = {", "= value.get(\"name\", None) if name: self.profiles[name] = value else: logger.info(\"Could not load profiler", "load profiler output %s\", value) return self.profiles if __name__ == \"__main__\": level =", "handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger = logging.getLogger(__name__) prfl = Profiler() msg = prfl.profile()", "json import logging from gym.common.process import Actuator logger = logging.getLogger(__name__) class Profiler: FILES", "__init__(self): self.profiles = {} self._outputs = [] self.actuator = Actuator() self.cfg_acts() def cfg_acts(self):", "= 'info_' FILES_SUFFIX = 'py' def __init__(self): self.profiles = {} self._outputs = []", "if name: self.profiles[name] = value else: logger.info(\"Could not load profiler output %s\", value)", "True, } self.actuator.cfg(cfg) def profile(self): self._outputs = self.actuator.get_acts() for value in self._outputs.values(): name", "folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg) def profile(self): self._outputs =", "= self.actuator.get_acts() for value in self._outputs.values(): name = value.get(\"name\", None) if name: self.profiles[name]", "cfg = { \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg)", "'info_' FILES_SUFFIX = 'py' def __init__(self): self.profiles = {} self._outputs = [] self.actuator", "Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = { \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX,", "return self.profiles if __name__ == \"__main__\": level = logging.DEBUG formatter = logging.Formatter('%(asctime)s -", "value) return self.profiles if __name__ == \"__main__\": level = logging.DEBUG formatter = logging.Formatter('%(asctime)s", "import json import logging from gym.common.process import Actuator logger = logging.getLogger(__name__) class Profiler:", "} self.actuator.cfg(cfg) def profile(self): self._outputs = self.actuator.get_acts() for value in self._outputs.values(): name =", "FILES_PREFIX = 'info_' FILES_SUFFIX = 'py' def __init__(self): self.profiles = {} self._outputs =", "Profiler: FILES = 'info' FILES_PREFIX = 'info_' FILES_SUFFIX = 'py' def __init__(self): self.profiles", "if __name__ == \"__main__\": level = logging.DEBUG formatter = logging.Formatter('%(asctime)s - %(name)s -", "%(levelname)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger = logging.getLogger(__name__)", "self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg =", "%s\", value) return self.profiles if __name__ == \"__main__\": level = logging.DEBUG formatter =", "__name__ == \"__main__\": level = logging.DEBUG formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s", "= {} self._outputs = [] self.actuator = Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile", "Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg) def profile(self): self._outputs = self.actuator.get_acts() for value in", "output %s\", value) return self.profiles if __name__ == \"__main__\": level = logging.DEBUG formatter", "\"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg) def profile(self): self._outputs = self.actuator.get_acts()", "Actuator logger = logging.getLogger(__name__) class Profiler: FILES = 'info' FILES_PREFIX = 'info_' FILES_SUFFIX", "os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = { \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True,", "%(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger = logging.getLogger(__name__) prfl =", "FILES_SUFFIX = 'py' def __init__(self): self.profiles = {} self._outputs = [] self.actuator =", "<gh_stars>1-10 import os import json import logging from gym.common.process import Actuator logger =", "\"__main__\": level = logging.DEBUG formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')", "= Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES)", "'py' def __init__(self): self.profiles = {} self._outputs = [] self.actuator = Actuator() self.cfg_acts()", "not load profiler output %s\", value) return self.profiles if __name__ == \"__main__\": level", "- %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger = logging.getLogger(__name__) prfl", "%(name)s - %(levelname)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger", "Profiler.FILES) cfg = { \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, }", "self._outputs = [] self.actuator = Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile Infos\") folder", "logging from gym.common.process import Actuator logger = logging.getLogger(__name__) class Profiler: FILES = 'info'", "import os import json import logging from gym.common.process import Actuator logger = logging.getLogger(__name__)", "self.profiles if __name__ == \"__main__\": level = logging.DEBUG formatter = logging.Formatter('%(asctime)s - %(name)s", "import logging from gym.common.process import Actuator logger = logging.getLogger(__name__) class Profiler: FILES =", "\"full_path\": True, } self.actuator.cfg(cfg) def profile(self): self._outputs = self.actuator.get_acts() for value in self._outputs.values():", "= [] self.actuator = Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile Infos\") folder =", "\"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\": True, } self.actuator.cfg(cfg) def profile(self): self._outputs", "name = value.get(\"name\", None) if name: self.profiles[name] = value else: logger.info(\"Could not load", "profile(self): self._outputs = self.actuator.get_acts() for value in self._outputs.values(): name = value.get(\"name\", None) if", "class Profiler: FILES = 'info' FILES_PREFIX = 'info_' FILES_SUFFIX = 'py' def __init__(self):", "logging.DEBUG formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler = logging.StreamHandler()", "= 'info' FILES_PREFIX = 'info_' FILES_SUFFIX = 'py' def __init__(self): self.profiles = {}", "os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = { \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX, \"full_path\":", "None) if name: self.profiles[name] = value else: logger.info(\"Could not load profiler output %s\",", "= logging.DEBUG formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler =", "value.get(\"name\", None) if name: self.profiles[name] = value else: logger.info(\"Could not load profiler output", "self.profiles[name] = value else: logger.info(\"Could not load profiler output %s\", value) return self.profiles", "else: logger.info(\"Could not load profiler output %s\", value) return self.profiles if __name__ ==", "[] self.actuator = Actuator() self.cfg_acts() def cfg_acts(self): logger.info(\"Loading Profile Infos\") folder = os.path.join(", "- %(levelname)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger =", "value else: logger.info(\"Could not load profiler output %s\", value) return self.profiles if __name__", "os import json import logging from gym.common.process import Actuator logger = logging.getLogger(__name__) class", "logger.info(\"Loading Profile Infos\") folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = { \"folder\": folder,", "from gym.common.process import Actuator logger = logging.getLogger(__name__) class Profiler: FILES = 'info' FILES_PREFIX", "= os.path.join( os.path.dirname(os.path.abspath(__file__)), Profiler.FILES) cfg = { \"folder\": folder, \"prefix\": Profiler.FILES_PREFIX, \"sufix\": Profiler.FILES_SUFFIX,", "value in self._outputs.values(): name = value.get(\"name\", None) if name: self.profiles[name] = value else:", "handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(level) logger = logging.getLogger(__name__) prfl = Profiler()", "= logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level)", "logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) logging.getLogger().addHandler(handler)" ]
[ "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "def __eq__(self, other): \"\"\" NullField is always not equal to any other value.", "this list of conditions and the following disclaimer. # 2. Redistributions in binary", "null field that does not exists. \"\"\" def __new__(cls, *args, **kwargs): global nullField", "assert nullField != 0 assert not (nullField > 1) assert not (nullField <", "without # modification, are permitted provided that the following conditions are met: #", "the # documentation and/or other materials provided with the distribution. # 3. Neither", "All rights reserved. # # Redistribution and use in source and binary forms,", "software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY", "1. Redistributions of source code must retain the above copyright # notice, this", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "OpenThread Authors. # All rights reserved. # # Redistribution and use in source", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "other value. \"\"\" return False def __ne__(self, other): return True def __lt__(self, other):", "and the following disclaimer. # 2. Redistributions in binary form must reproduce the", "2019, The OpenThread Authors. # All rights reserved. # # Redistribution and use", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "def __bool__(self): \"\"\" NullField is always treated as False. \"\"\" return False def", "return False def __le__(self, other): \"\"\" Comparing NullField to any other value gets", "return True def __lt__(self, other): \"\"\" Comparing NullField to any other value gets", "CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #", "NullField to any other value gets False. \"\"\" return False def __ge__(self, other):", "def __len__(self) -> 0: return 0 def __eq__(self, other): \"\"\" NullField is always", "conditions and the following disclaimer in the # documentation and/or other materials provided", "any other value gets False. \"\"\" return False def __str__(self): return \"nullField\" def", "return 0 def __eq__(self, other): \"\"\" NullField is always not equal to any", "if __name__ == '__main__': assert nullField is NullField() assert not nullField, repr(nullField) assert", "assert not nullField, repr(nullField) assert nullField != nullField, repr(nullField) assert nullField != 0", "NullField itself. \"\"\" return self def __setattr__(self, key, value): pass def __len__(self) ->", "value. \"\"\" return False def __ne__(self, other): return True def __lt__(self, other): \"\"\"", "BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "notice, this list of conditions and the following disclaimer in the # documentation", "!= 0 assert not (nullField > 1) assert not (nullField < 1) assert", "assert not (nullField > 1) assert not (nullField < 1) assert not (nullField", "this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED", "as False. \"\"\" return False def __getattr__(self, item): \"\"\" Any sub field of", "field of the NullField is NullField itself. \"\"\" return self def __setattr__(self, key,", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "nullField = None class NullField(object): \"\"\" Represents a null field that does not", "python3 # # Copyright (c) 2019, The OpenThread Authors. # All rights reserved.", "other value gets False. \"\"\" return False def __le__(self, other): \"\"\" Comparing NullField", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR", "BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS", "SUCH DAMAGE. # nullField = None class NullField(object): \"\"\" Represents a null field", "= object.__new__(cls, *args, **kwargs) return nullField def __init__(self): assert self is nullField def", "(c) 2019, The OpenThread Authors. # All rights reserved. # # Redistribution and", "IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING", "TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "with or without # modification, are permitted provided that the following conditions are", "return False def __ne__(self, other): return True def __lt__(self, other): \"\"\" Comparing NullField", "NullField is always not equal to any other value. \"\"\" return False def", "EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #", "BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL", "def __ne__(self, other): return True def __lt__(self, other): \"\"\" Comparing NullField to any", "holder nor the # names of its contributors may be used to endorse", "global nullField if nullField is None: nullField = object.__new__(cls, *args, **kwargs) return nullField", "# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS", "nullField if nullField is None: nullField = object.__new__(cls, *args, **kwargs) return nullField def", "# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "assert not (nullField > nullField) assert bool(nullField) is False assert nullField != \"\"", "\"\"\" return False def __ne__(self, other): return True def __lt__(self, other): \"\"\" Comparing", "Neither the name of the copyright holder nor the # names of its", "and/or other materials provided with the distribution. # 3. Neither the name of", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO", "met: # 1. Redistributions of source code must retain the above copyright #", "OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "3. Neither the name of the copyright holder nor the # names of", "'__main__': assert nullField is NullField() assert not nullField, repr(nullField) assert nullField != nullField,", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "assert self is nullField def __bool__(self): \"\"\" NullField is always treated as False.", "above copyright # notice, this list of conditions and the following disclaimer. #", "rights reserved. # # Redistribution and use in source and binary forms, with", "OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY", "permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "repr(nullField) assert nullField != nullField, repr(nullField) assert nullField != 0 assert not (nullField", "#!/usr/bin/env python3 # # Copyright (c) 2019, The OpenThread Authors. # All rights", "Redistributions in binary form must reproduce the above copyright # notice, this list", "Comparing NullField to any other value gets False. \"\"\" return False def __le__(self,", "*args, **kwargs): global nullField if nullField is None: nullField = object.__new__(cls, *args, **kwargs)", "contributors may be used to endorse or promote products # derived from this", "the # names of its contributors may be used to endorse or promote", "its contributors may be used to endorse or promote products # derived from", "PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT", "field that does not exists. \"\"\" def __new__(cls, *args, **kwargs): global nullField if", "bool(nullField) is False assert nullField != \"\" assert nullField != None # noqa", "__getattr__(self, item): \"\"\" Any sub field of the NullField is NullField itself. \"\"\"", "False def __gt__(self, other): \"\"\" Comparing NullField to any other value gets False.", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "!= nullField, repr(nullField) assert nullField != 0 assert not (nullField > 1) assert", "nor the # names of its contributors may be used to endorse or", "False def __le__(self, other): \"\"\" Comparing NullField to any other value gets False.", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "return self def __setattr__(self, key, value): pass def __len__(self) -> 0: return 0", "__le__(self, other): \"\"\" Comparing NullField to any other value gets False. \"\"\" return", "Comparing NullField to any other value gets False. \"\"\" return False def __gt__(self,", "OF SUCH DAMAGE. # nullField = None class NullField(object): \"\"\" Represents a null", "any other value gets False. \"\"\" return False def __gt__(self, other): \"\"\" Comparing", "DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "False. \"\"\" return False def __gt__(self, other): \"\"\" Comparing NullField to any other", "\"\"\" Comparing NullField to any other value gets False. \"\"\" return False def", "or without # modification, are permitted provided that the following conditions are met:", "without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE", "Authors. # All rights reserved. # # Redistribution and use in source and", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "1) assert not (nullField < nullField) assert not (nullField > nullField) assert bool(nullField)", "distribution. # 3. Neither the name of the copyright holder nor the #", "NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF", "def __le__(self, other): \"\"\" Comparing NullField to any other value gets False. \"\"\"", "code must retain the above copyright # notice, this list of conditions and", "# notice, this list of conditions and the following disclaimer in the #", "HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN", "WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #", "__len__(self) -> 0: return 0 def __eq__(self, other): \"\"\" NullField is always not", "equal to any other value. \"\"\" return False def __ne__(self, other): return True", "def __new__(cls, *args, **kwargs): global nullField if nullField is None: nullField = object.__new__(cls,", "copyright # notice, this list of conditions and the following disclaimer. # 2.", "are permitted provided that the following conditions are met: # 1. Redistributions of", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING,", "> 1) assert not (nullField < 1) assert not (nullField < nullField) assert", "AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #", "PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "False def __getattr__(self, item): \"\"\" Any sub field of the NullField is NullField", "nullField is None: nullField = object.__new__(cls, *args, **kwargs) return nullField def __init__(self): assert", "__lt__(self, other): \"\"\" Comparing NullField to any other value gets False. \"\"\" return", "assert bool(nullField) is False assert nullField != \"\" assert nullField != None #", "the following disclaimer. # 2. Redistributions in binary form must reproduce the above", "with the distribution. # 3. Neither the name of the copyright holder nor", "# notice, this list of conditions and the following disclaimer. # 2. Redistributions", "and use in source and binary forms, with or without # modification, are", "not (nullField < 1) assert not (nullField < nullField) assert not (nullField >", "NullField to any other value gets False. \"\"\" return False def __le__(self, other):", "exists. \"\"\" def __new__(cls, *args, **kwargs): global nullField if nullField is None: nullField", "this list of conditions and the following disclaimer in the # documentation and/or", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "# names of its contributors may be used to endorse or promote products", "to any other value gets False. \"\"\" return False def __ge__(self, other): \"\"\"", "1) assert not (nullField < 1) assert not (nullField < nullField) assert not", "NullField() assert not nullField, repr(nullField) assert nullField != nullField, repr(nullField) assert nullField !=", "\"\"\" Any sub field of the NullField is NullField itself. \"\"\" return self", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT,", "disclaimer in the # documentation and/or other materials provided with the distribution. #", "NullField to any other value gets False. \"\"\" return False def __str__(self): return", "# nullField = None class NullField(object): \"\"\" Represents a null field that does", "__init__(self): assert self is nullField def __bool__(self): \"\"\" NullField is always treated as", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT", "nullField = object.__new__(cls, *args, **kwargs) return nullField def __init__(self): assert self is nullField", "a null field that does not exists. \"\"\" def __new__(cls, *args, **kwargs): global", "\"\"\" return False def __getattr__(self, item): \"\"\" Any sub field of the NullField", "nullField, repr(nullField) assert nullField != 0 assert not (nullField > 1) assert not", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT", "must retain the above copyright # notice, this list of conditions and the", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT,", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.", "gets False. \"\"\" return False def __gt__(self, other): \"\"\" Comparing NullField to any", "nullField != nullField, repr(nullField) assert nullField != 0 assert not (nullField > 1)", "Redistribution and use in source and binary forms, with or without # modification,", "source and binary forms, with or without # modification, are permitted provided that", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "False. \"\"\" return False def __getattr__(self, item): \"\"\" Any sub field of the", "\"\"\" NullField is always not equal to any other value. \"\"\" return False", "__name__ == '__main__': assert nullField is NullField() assert not nullField, repr(nullField) assert nullField", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED", "self def __setattr__(self, key, value): pass def __len__(self) -> 0: return 0 def", "always treated as False. \"\"\" return False def __getattr__(self, item): \"\"\" Any sub", "other value gets False. \"\"\" return False def __gt__(self, other): \"\"\" Comparing NullField", "above copyright # notice, this list of conditions and the following disclaimer in", "FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "that the following conditions are met: # 1. Redistributions of source code must", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR", "binary form must reproduce the above copyright # notice, this list of conditions", "(INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE", "Redistributions of source code must retain the above copyright # notice, this list", "form must reproduce the above copyright # notice, this list of conditions and", "PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "# modification, are permitted provided that the following conditions are met: # 1.", "of conditions and the following disclaimer. # 2. Redistributions in binary form must", "NullField is NullField itself. \"\"\" return self def __setattr__(self, key, value): pass def", "__setattr__(self, key, value): pass def __len__(self) -> 0: return 0 def __eq__(self, other):", "!= \"\" assert nullField != None # noqa assert nullField is not None", "the name of the copyright holder nor the # names of its contributors", "\"\"\" Represents a null field that does not exists. \"\"\" def __new__(cls, *args,", "return False def __str__(self): return \"nullField\" def __repr__(self): return 'nullField' NullField() if __name__", "used to endorse or promote products # derived from this software without specific", "following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright", "__repr__(self): return 'nullField' NullField() if __name__ == '__main__': assert nullField is NullField() assert", "promote products # derived from this software without specific prior written permission. #", "the above copyright # notice, this list of conditions and the following disclaimer", "\"\"\" return False def __str__(self): return \"nullField\" def __repr__(self): return 'nullField' NullField() if", "object.__new__(cls, *args, **kwargs) return nullField def __init__(self): assert self is nullField def __bool__(self):", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "to any other value. \"\"\" return False def __ne__(self, other): return True def", "other value gets False. \"\"\" return False def __str__(self): return \"nullField\" def __repr__(self):", "the following conditions are met: # 1. Redistributions of source code must retain", "and the following disclaimer in the # documentation and/or other materials provided with", "False. \"\"\" return False def __ge__(self, other): \"\"\" Comparing NullField to any other", "OF THE # POSSIBILITY OF SUCH DAMAGE. # nullField = None class NullField(object):", "def __str__(self): return \"nullField\" def __repr__(self): return 'nullField' NullField() if __name__ == '__main__':", "forms, with or without # modification, are permitted provided that the following conditions", "> nullField) assert bool(nullField) is False assert nullField != \"\" assert nullField !=", "nullField def __init__(self): assert self is nullField def __bool__(self): \"\"\" NullField is always", "value gets False. \"\"\" return False def __gt__(self, other): \"\"\" Comparing NullField to", "return 'nullField' NullField() if __name__ == '__main__': assert nullField is NullField() assert not", "source code must retain the above copyright # notice, this list of conditions", "conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce", "nullField def __bool__(self): \"\"\" NullField is always treated as False. \"\"\" return False", "0 assert not (nullField > 1) assert not (nullField < 1) assert not", "in source and binary forms, with or without # modification, are permitted provided", "# # Copyright (c) 2019, The OpenThread Authors. # All rights reserved. #", "def __getattr__(self, item): \"\"\" Any sub field of the NullField is NullField itself.", "other materials provided with the distribution. # 3. Neither the name of the", "# # Redistribution and use in source and binary forms, with or without", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF", "permitted provided that the following conditions are met: # 1. Redistributions of source", "other value gets False. \"\"\" return False def __ge__(self, other): \"\"\" Comparing NullField", "False def __ne__(self, other): return True def __lt__(self, other): \"\"\" Comparing NullField to", "not (nullField < nullField) assert not (nullField > nullField) assert bool(nullField) is False", "False def __ge__(self, other): \"\"\" Comparing NullField to any other value gets False.", "is False assert nullField != \"\" assert nullField != None # noqa assert", "COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "POSSIBILITY OF SUCH DAMAGE. # nullField = None class NullField(object): \"\"\" Represents a", "the NullField is NullField itself. \"\"\" return self def __setattr__(self, key, value): pass", "always not equal to any other value. \"\"\" return False def __ne__(self, other):", "False assert nullField != \"\" assert nullField != None # noqa assert nullField", "def __gt__(self, other): \"\"\" Comparing NullField to any other value gets False. \"\"\"", "prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "modification, are permitted provided that the following conditions are met: # 1. Redistributions", "in the # documentation and/or other materials provided with the distribution. # 3.", "repr(nullField) assert nullField != 0 assert not (nullField > 1) assert not (nullField", "ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # nullField = None class", "not exists. \"\"\" def __new__(cls, *args, **kwargs): global nullField if nullField is None:", "(nullField > 1) assert not (nullField < 1) assert not (nullField < nullField)", "False def __str__(self): return \"nullField\" def __repr__(self): return 'nullField' NullField() if __name__ ==", "Represents a null field that does not exists. \"\"\" def __new__(cls, *args, **kwargs):", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED.", "0: return 0 def __eq__(self, other): \"\"\" NullField is always not equal to", "is always not equal to any other value. \"\"\" return False def __ne__(self,", "copyright holder nor the # names of its contributors may be used to", "OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", "< 1) assert not (nullField < nullField) assert not (nullField > nullField) assert", "not (nullField > nullField) assert bool(nullField) is False assert nullField != \"\" assert", "sub field of the NullField is NullField itself. \"\"\" return self def __setattr__(self,", "# documentation and/or other materials provided with the distribution. # 3. Neither the", "list of conditions and the following disclaimer. # 2. Redistributions in binary form", "products # derived from this software without specific prior written permission. # #", "OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE", "IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # nullField = None", "SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # nullField", "INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "use in source and binary forms, with or without # modification, are permitted", "endorse or promote products # derived from this software without specific prior written", "nullField != \"\" assert nullField != None # noqa assert nullField is not", "in binary form must reproduce the above copyright # notice, this list of", "== '__main__': assert nullField is NullField() assert not nullField, repr(nullField) assert nullField !=", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "is NullField itself. \"\"\" return self def __setattr__(self, key, value): pass def __len__(self)", "# 3. Neither the name of the copyright holder nor the # names", "# All rights reserved. # # Redistribution and use in source and binary", "\"\"\" def __new__(cls, *args, **kwargs): global nullField if nullField is None: nullField =", "False. \"\"\" return False def __str__(self): return \"nullField\" def __repr__(self): return 'nullField' NullField()", "# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN", "that does not exists. \"\"\" def __new__(cls, *args, **kwargs): global nullField if nullField", "of the copyright holder nor the # names of its contributors may be", "is None: nullField = object.__new__(cls, *args, **kwargs) return nullField def __init__(self): assert self", "names of its contributors may be used to endorse or promote products #", "None class NullField(object): \"\"\" Represents a null field that does not exists. \"\"\"", "(nullField < nullField) assert not (nullField > nullField) assert bool(nullField) is False assert", "materials provided with the distribution. # 3. Neither the name of the copyright", "following conditions are met: # 1. Redistributions of source code must retain the", "# Copyright (c) 2019, The OpenThread Authors. # All rights reserved. # #", "self is nullField def __bool__(self): \"\"\" NullField is always treated as False. \"\"\"", "SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "NullField(object): \"\"\" Represents a null field that does not exists. \"\"\" def __new__(cls,", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION)", "pass def __len__(self) -> 0: return 0 def __eq__(self, other): \"\"\" NullField is", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "item): \"\"\" Any sub field of the NullField is NullField itself. \"\"\" return", "provided with the distribution. # 3. Neither the name of the copyright holder", "# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY", "is nullField def __bool__(self): \"\"\" NullField is always treated as False. \"\"\" return", "\"\"\" return self def __setattr__(self, key, value): pass def __len__(self) -> 0: return", "True def __lt__(self, other): \"\"\" Comparing NullField to any other value gets False.", "assert nullField != \"\" assert nullField != None # noqa assert nullField is", "A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "assert nullField is NullField() assert not nullField, repr(nullField) assert nullField != nullField, repr(nullField)", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF", "\"\"\" return False def __gt__(self, other): \"\"\" Comparing NullField to any other value", "to any other value gets False. \"\"\" return False def __gt__(self, other): \"\"\"", "# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "not equal to any other value. \"\"\" return False def __ne__(self, other): return", "if nullField is None: nullField = object.__new__(cls, *args, **kwargs) return nullField def __init__(self):", "are met: # 1. Redistributions of source code must retain the above copyright", "THE # POSSIBILITY OF SUCH DAMAGE. # nullField = None class NullField(object): \"\"\"", "2. Redistributions in binary form must reproduce the above copyright # notice, this", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "0 def __eq__(self, other): \"\"\" NullField is always not equal to any other", "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL", "other): \"\"\" Comparing NullField to any other value gets False. \"\"\" return False", "reproduce the above copyright # notice, this list of conditions and the following", "gets False. \"\"\" return False def __le__(self, other): \"\"\" Comparing NullField to any", "value gets False. \"\"\" return False def __ge__(self, other): \"\"\" Comparing NullField to", "nullField != 0 assert not (nullField > 1) assert not (nullField < 1)", "def __repr__(self): return 'nullField' NullField() if __name__ == '__main__': assert nullField is NullField()", "and binary forms, with or without # modification, are permitted provided that the", "to any other value gets False. \"\"\" return False def __le__(self, other): \"\"\"", "assert not (nullField < nullField) assert not (nullField > nullField) assert bool(nullField) is", "itself. \"\"\" return self def __setattr__(self, key, value): pass def __len__(self) -> 0:", "is NullField() assert not nullField, repr(nullField) assert nullField != nullField, repr(nullField) assert nullField", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "return False def __getattr__(self, item): \"\"\" Any sub field of the NullField is", "\"\"\" return False def __le__(self, other): \"\"\" Comparing NullField to any other value", "nullField, repr(nullField) assert nullField != nullField, repr(nullField) assert nullField != 0 assert not", "the copyright holder nor the # names of its contributors may be used", "< nullField) assert not (nullField > nullField) assert bool(nullField) is False assert nullField", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT,", "following disclaimer in the # documentation and/or other materials provided with the distribution.", "\"\"\" NullField is always treated as False. \"\"\" return False def __getattr__(self, item):", "to any other value gets False. \"\"\" return False def __str__(self): return \"nullField\"", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "*args, **kwargs) return nullField def __init__(self): assert self is nullField def __bool__(self): \"\"\"", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "NullField is always treated as False. \"\"\" return False def __getattr__(self, item): \"\"\"", "assert not (nullField < 1) assert not (nullField < nullField) assert not (nullField", "__ne__(self, other): return True def __lt__(self, other): \"\"\" Comparing NullField to any other", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE", "OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "Copyright (c) 2019, The OpenThread Authors. # All rights reserved. # # Redistribution", "OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "def __setattr__(self, key, value): pass def __len__(self) -> 0: return 0 def __eq__(self,", "nullField is NullField() assert not nullField, repr(nullField) assert nullField != nullField, repr(nullField) assert", "of conditions and the following disclaimer in the # documentation and/or other materials", "LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "of its contributors may be used to endorse or promote products # derived", "# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES", "OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON", "any other value gets False. \"\"\" return False def __le__(self, other): \"\"\" Comparing", "gets False. \"\"\" return False def __ge__(self, other): \"\"\" Comparing NullField to any", "value gets False. \"\"\" return False def __str__(self): return \"nullField\" def __repr__(self): return", "LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "None: nullField = object.__new__(cls, *args, **kwargs) return nullField def __init__(self): assert self is", "The OpenThread Authors. # All rights reserved. # # Redistribution and use in", "__str__(self): return \"nullField\" def __repr__(self): return 'nullField' NullField() if __name__ == '__main__': assert", "(nullField < 1) assert not (nullField < nullField) assert not (nullField > nullField)", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES;", "assert nullField != nullField, repr(nullField) assert nullField != 0 assert not (nullField >", "return False def __ge__(self, other): \"\"\" Comparing NullField to any other value gets", "def __ge__(self, other): \"\"\" Comparing NullField to any other value gets False. \"\"\"", "documentation and/or other materials provided with the distribution. # 3. Neither the name", "\"nullField\" def __repr__(self): return 'nullField' NullField() if __name__ == '__main__': assert nullField is", "disclaimer. # 2. Redistributions in binary form must reproduce the above copyright #", "(nullField > nullField) assert bool(nullField) is False assert nullField != \"\" assert nullField", "__new__(cls, *args, **kwargs): global nullField if nullField is None: nullField = object.__new__(cls, *args,", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #", "gets False. \"\"\" return False def __str__(self): return \"nullField\" def __repr__(self): return 'nullField'", "OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS", "THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "not (nullField > 1) assert not (nullField < 1) assert not (nullField <", "\"\"\" return False def __ge__(self, other): \"\"\" Comparing NullField to any other value", "Comparing NullField to any other value gets False. \"\"\" return False def __ge__(self,", "other): \"\"\" NullField is always not equal to any other value. \"\"\" return", "notice, this list of conditions and the following disclaimer. # 2. Redistributions in", "THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF", "the following disclaimer in the # documentation and/or other materials provided with the", "# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "must reproduce the above copyright # notice, this list of conditions and the", "class NullField(object): \"\"\" Represents a null field that does not exists. \"\"\" def", "OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "return \"nullField\" def __repr__(self): return 'nullField' NullField() if __name__ == '__main__': assert nullField", "any other value gets False. \"\"\" return False def __ge__(self, other): \"\"\" Comparing", "EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # nullField =", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE", "-> 0: return 0 def __eq__(self, other): \"\"\" NullField is always not equal", "not nullField, repr(nullField) assert nullField != nullField, repr(nullField) assert nullField != 0 assert", "return nullField def __init__(self): assert self is nullField def __bool__(self): \"\"\" NullField is", "list of conditions and the following disclaimer in the # documentation and/or other", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "name of the copyright holder nor the # names of its contributors may", "retain the above copyright # notice, this list of conditions and the following", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE", "may be used to endorse or promote products # derived from this software", "# derived from this software without specific prior written permission. # # THIS", "__ge__(self, other): \"\"\" Comparing NullField to any other value gets False. \"\"\" return", "'nullField' NullField() if __name__ == '__main__': assert nullField is NullField() assert not nullField,", "USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY", "be used to endorse or promote products # derived from this software without", "to endorse or promote products # derived from this software without specific prior", "reserved. # # Redistribution and use in source and binary forms, with or", "False. \"\"\" return False def __le__(self, other): \"\"\" Comparing NullField to any other", "THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "# Redistribution and use in source and binary forms, with or without #", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "the distribution. # 3. Neither the name of the copyright holder nor the", "= None class NullField(object): \"\"\" Represents a null field that does not exists.", "other): return True def __lt__(self, other): \"\"\" Comparing NullField to any other value", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "def __lt__(self, other): \"\"\" Comparing NullField to any other value gets False. \"\"\"", "DAMAGE. # nullField = None class NullField(object): \"\"\" Represents a null field that", "is always treated as False. \"\"\" return False def __getattr__(self, item): \"\"\" Any", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "def __init__(self): assert self is nullField def __bool__(self): \"\"\" NullField is always treated", "__bool__(self): \"\"\" NullField is always treated as False. \"\"\" return False def __getattr__(self,", "or promote products # derived from this software without specific prior written permission.", "value): pass def __len__(self) -> 0: return 0 def __eq__(self, other): \"\"\" NullField", "any other value. \"\"\" return False def __ne__(self, other): return True def __lt__(self,", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER", "__eq__(self, other): \"\"\" NullField is always not equal to any other value. \"\"\"", "of the NullField is NullField itself. \"\"\" return self def __setattr__(self, key, value):", "Any sub field of the NullField is NullField itself. \"\"\" return self def", "return False def __gt__(self, other): \"\"\" Comparing NullField to any other value gets", "Comparing NullField to any other value gets False. \"\"\" return False def __str__(self):", "key, value): pass def __len__(self) -> 0: return 0 def __eq__(self, other): \"\"\"", "value gets False. \"\"\" return False def __le__(self, other): \"\"\" Comparing NullField to", "binary forms, with or without # modification, are permitted provided that the following", "of source code must retain the above copyright # notice, this list of", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #", "# 2. Redistributions in binary form must reproduce the above copyright # notice,", "NullField() if __name__ == '__main__': assert nullField is NullField() assert not nullField, repr(nullField)", "from this software without specific prior written permission. # # THIS SOFTWARE IS", "**kwargs) return nullField def __init__(self): assert self is nullField def __bool__(self): \"\"\" NullField", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "provided that the following conditions are met: # 1. Redistributions of source code", "USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY,", "NullField to any other value gets False. \"\"\" return False def __gt__(self, other):", "the above copyright # notice, this list of conditions and the following disclaimer.", "FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE", "conditions are met: # 1. Redistributions of source code must retain the above", "# 1. Redistributions of source code must retain the above copyright # notice,", "PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "derived from this software without specific prior written permission. # # THIS SOFTWARE", "**kwargs): global nullField if nullField is None: nullField = object.__new__(cls, *args, **kwargs) return", "written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "treated as False. \"\"\" return False def __getattr__(self, item): \"\"\" Any sub field", "nullField) assert bool(nullField) is False assert nullField != \"\" assert nullField != None", "nullField) assert not (nullField > nullField) assert bool(nullField) is False assert nullField !=", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "copyright # notice, this list of conditions and the following disclaimer in the", "__gt__(self, other): \"\"\" Comparing NullField to any other value gets False. \"\"\" return", "specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "# POSSIBILITY OF SUCH DAMAGE. # nullField = None class NullField(object): \"\"\" Represents", "does not exists. \"\"\" def __new__(cls, *args, **kwargs): global nullField if nullField is" ]
[ "Device) -> str: return device.app_current()['package'] + device.app_current()['activity'] def activity_window(device: Device, activity: str) ->", "\"\"\" from adbutils import adb import uiautomator2 as u2 from uiautomator2 import Device", "#\\\"\") return wins.output.strip().split('\\n') def stop_app(device: Device, package: str): device.app_stop(package) def stop_all(device: Device): device.app_stop_all()", "from uiautomator2 import Device def list_all_devices() -> []: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组", "uiautomator2 as u2 from uiautomator2 import Device def list_all_devices() -> []: \"\"\" 列出当前所有连接的设备", "没有连接设备返回空数组 \"\"\" device_list = adb.device_list() if len(device_list) < 1: return [] else: return", "adb import uiautomator2 as u2 from uiautomator2 import Device def list_all_devices() -> []:", "------- -------- ----------- 2021/10/25 10:23 zxd 1.0 None \"\"\" from adbutils import adb", "+ \" | grep \\\"Window #\\\"\") return wins.output.strip().split('\\n') def stop_app(device: Device, package: str):", "列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list = adb.device_list() if len(device_list) < 1: return", "\" + activity + \" | grep \\\"Window #\\\"\") return wins.output.strip().split('\\n') def stop_app(device:", "@Version @Description ------------ ------- -------- ----------- 2021/10/25 10:23 zxd 1.0 None \"\"\" from", "\\\"Window #\\\"\") return wins.output.strip().split('\\n') def stop_app(device: Device, package: str): device.app_stop(package) def stop_all(device: Device):", "import adb import uiautomator2 as u2 from uiautomator2 import Device def list_all_devices() ->", "<EMAIL> @License : MIT @Modify Time @Author @Version @Description ------------ ------- -------- -----------", "-*- \"\"\" @File : device_util.py @Contact : <EMAIL> @License : MIT @Modify Time", "def list_all_devices() -> []: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list = adb.device_list()", "1.0 None \"\"\" from adbutils import adb import uiautomator2 as u2 from uiautomator2", "as u2 from uiautomator2 import Device def list_all_devices() -> []: \"\"\" 列出当前所有连接的设备 Returns:", "u2 from uiautomator2 import Device def list_all_devices() -> []: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组,", "= adb.device_list() if len(device_list) < 1: return [] else: return [device.serial for device", "if len(device_list) < 1: return [] else: return [device.serial for device in device_list]", "uiautomator2 import Device def list_all_devices() -> []: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\"", "已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list = adb.device_list() if len(device_list) < 1: return [] else:", "Device: return u2.connect(addr) def top_app(device: Device) -> str: return device.current_app()['package'] def top_activity(device: Device)", "from adbutils import adb import uiautomator2 as u2 from uiautomator2 import Device def", "wins.output.strip().split('\\n') def stop_app(device: Device, package: str): device.app_stop(package) def stop_all(device: Device): device.app_stop_all() if __name__", "\"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list = adb.device_list() if len(device_list) < 1:", "= device.shell(\"dumpsys window \" + activity + \" | grep \\\"Window #\\\"\") return", "+ device.app_current()['activity'] def activity_window(device: Device, activity: str) -> list: wins = device.shell(\"dumpsys window", "-> list: wins = device.shell(\"dumpsys window \" + activity + \" | grep", "list: wins = device.shell(\"dumpsys window \" + activity + \" | grep \\\"Window", "import uiautomator2 as u2 from uiautomator2 import Device def list_all_devices() -> []: \"\"\"", "wins = device.shell(\"dumpsys window \" + activity + \" | grep \\\"Window #\\\"\")", "device.app_current()['activity'] def activity_window(device: Device, activity: str) -> list: wins = device.shell(\"dumpsys window \"", "\"\"\" device_list = adb.device_list() if len(device_list) < 1: return [] else: return [device.serial", "return device.current_app()['package'] def top_activity(device: Device) -> str: return device.app_current()['package'] + device.app_current()['activity'] def activity_window(device:", "adbutils import adb import uiautomator2 as u2 from uiautomator2 import Device def list_all_devices()", "-> []: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list = adb.device_list() if len(device_list)", "#!/usr/bin/env python # -*- encoding: utf-8 -*- \"\"\" @File : device_util.py @Contact :", "activity: str) -> list: wins = device.shell(\"dumpsys window \" + activity + \"", "-*- encoding: utf-8 -*- \"\"\" @File : device_util.py @Contact : <EMAIL> @License :", "-> Device: return u2.connect(addr) def top_app(device: Device) -> str: return device.current_app()['package'] def top_activity(device:", "\"\"\" @File : device_util.py @Contact : <EMAIL> @License : MIT @Modify Time @Author", "top_app(device: Device) -> str: return device.current_app()['package'] def top_activity(device: Device) -> str: return device.app_current()['package']", "[] else: return [device.serial for device in device_list] def connect(addr: str) -> Device:", "\" | grep \\\"Window #\\\"\") return wins.output.strip().split('\\n') def stop_app(device: Device, package: str): device.app_stop(package)", "device_list] def connect(addr: str) -> Device: return u2.connect(addr) def top_app(device: Device) -> str:", "@Description ------------ ------- -------- ----------- 2021/10/25 10:23 zxd 1.0 None \"\"\" from adbutils", "1: return [] else: return [device.serial for device in device_list] def connect(addr: str)", "connect(addr: str) -> Device: return u2.connect(addr) def top_app(device: Device) -> str: return device.current_app()['package']", ": <EMAIL> @License : MIT @Modify Time @Author @Version @Description ------------ ------- --------", "len(device_list) < 1: return [] else: return [device.serial for device in device_list] def", "zxd 1.0 None \"\"\" from adbutils import adb import uiautomator2 as u2 from", "None \"\"\" from adbutils import adb import uiautomator2 as u2 from uiautomator2 import", "Time @Author @Version @Description ------------ ------- -------- ----------- 2021/10/25 10:23 zxd 1.0 None", "-------- ----------- 2021/10/25 10:23 zxd 1.0 None \"\"\" from adbutils import adb import", "for device in device_list] def connect(addr: str) -> Device: return u2.connect(addr) def top_app(device:", "return u2.connect(addr) def top_app(device: Device) -> str: return device.current_app()['package'] def top_activity(device: Device) ->", "<gh_stars>0 #!/usr/bin/env python # -*- encoding: utf-8 -*- \"\"\" @File : device_util.py @Contact", "device.app_current()['package'] + device.app_current()['activity'] def activity_window(device: Device, activity: str) -> list: wins = device.shell(\"dumpsys", "device.current_app()['package'] def top_activity(device: Device) -> str: return device.app_current()['package'] + device.app_current()['activity'] def activity_window(device: Device,", "grep \\\"Window #\\\"\") return wins.output.strip().split('\\n') def stop_app(device: Device, package: str): device.app_stop(package) def stop_all(device:", "----------- 2021/10/25 10:23 zxd 1.0 None \"\"\" from adbutils import adb import uiautomator2", "device.shell(\"dumpsys window \" + activity + \" | grep \\\"Window #\\\"\") return wins.output.strip().split('\\n')", "Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list = adb.device_list() if len(device_list) < 1: return []", "return [] else: return [device.serial for device in device_list] def connect(addr: str) ->", "[device.serial for device in device_list] def connect(addr: str) -> Device: return u2.connect(addr) def", "def stop_app(device: Device, package: str): device.app_stop(package) def stop_all(device: Device): device.app_stop_all() if __name__ ==", "< 1: return [] else: return [device.serial for device in device_list] def connect(addr:", "str) -> list: wins = device.shell(\"dumpsys window \" + activity + \" |", "[]: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list = adb.device_list() if len(device_list) <", "device in device_list] def connect(addr: str) -> Device: return u2.connect(addr) def top_app(device: Device)", "2021/10/25 10:23 zxd 1.0 None \"\"\" from adbutils import adb import uiautomator2 as", "encoding: utf-8 -*- \"\"\" @File : device_util.py @Contact : <EMAIL> @License : MIT", "return device.app_current()['package'] + device.app_current()['activity'] def activity_window(device: Device, activity: str) -> list: wins =", "------------ ------- -------- ----------- 2021/10/25 10:23 zxd 1.0 None \"\"\" from adbutils import", "adb.device_list() if len(device_list) < 1: return [] else: return [device.serial for device in", "utf-8 -*- \"\"\" @File : device_util.py @Contact : <EMAIL> @License : MIT @Modify", "str: return device.app_current()['package'] + device.app_current()['activity'] def activity_window(device: Device, activity: str) -> list: wins", "Device, package: str): device.app_stop(package) def stop_all(device: Device): device.app_stop_all() if __name__ == '__main__': print(list_all_devices())", "activity + \" | grep \\\"Window #\\\"\") return wins.output.strip().split('\\n') def stop_app(device: Device, package:", "str: return device.current_app()['package'] def top_activity(device: Device) -> str: return device.app_current()['package'] + device.app_current()['activity'] def", "window \" + activity + \" | grep \\\"Window #\\\"\") return wins.output.strip().split('\\n') def", "stop_app(device: Device, package: str): device.app_stop(package) def stop_all(device: Device): device.app_stop_all() if __name__ == '__main__':", ": device_util.py @Contact : <EMAIL> @License : MIT @Modify Time @Author @Version @Description", "# -*- encoding: utf-8 -*- \"\"\" @File : device_util.py @Contact : <EMAIL> @License", "import Device def list_all_devices() -> []: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list", "| grep \\\"Window #\\\"\") return wins.output.strip().split('\\n') def stop_app(device: Device, package: str): device.app_stop(package) def", "@File : device_util.py @Contact : <EMAIL> @License : MIT @Modify Time @Author @Version", "list_all_devices() -> []: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list = adb.device_list() if", "str) -> Device: return u2.connect(addr) def top_app(device: Device) -> str: return device.current_app()['package'] def", "device_list = adb.device_list() if len(device_list) < 1: return [] else: return [device.serial for", "-> str: return device.current_app()['package'] def top_activity(device: Device) -> str: return device.app_current()['package'] + device.app_current()['activity']", "device_util.py @Contact : <EMAIL> @License : MIT @Modify Time @Author @Version @Description ------------", "@Author @Version @Description ------------ ------- -------- ----------- 2021/10/25 10:23 zxd 1.0 None \"\"\"", "@License : MIT @Modify Time @Author @Version @Description ------------ ------- -------- ----------- 2021/10/25", "def top_activity(device: Device) -> str: return device.app_current()['package'] + device.app_current()['activity'] def activity_window(device: Device, activity:", "return wins.output.strip().split('\\n') def stop_app(device: Device, package: str): device.app_stop(package) def stop_all(device: Device): device.app_stop_all() if", "def activity_window(device: Device, activity: str) -> list: wins = device.shell(\"dumpsys window \" +", "Device) -> str: return device.current_app()['package'] def top_activity(device: Device) -> str: return device.app_current()['package'] +", "in device_list] def connect(addr: str) -> Device: return u2.connect(addr) def top_app(device: Device) ->", "Device def list_all_devices() -> []: \"\"\" 列出当前所有连接的设备 Returns: 已连接设备的序列号数组, 没有连接设备返回空数组 \"\"\" device_list =", "top_activity(device: Device) -> str: return device.app_current()['package'] + device.app_current()['activity'] def activity_window(device: Device, activity: str)", "python # -*- encoding: utf-8 -*- \"\"\" @File : device_util.py @Contact : <EMAIL>", "activity_window(device: Device, activity: str) -> list: wins = device.shell(\"dumpsys window \" + activity", "MIT @Modify Time @Author @Version @Description ------------ ------- -------- ----------- 2021/10/25 10:23 zxd", "+ activity + \" | grep \\\"Window #\\\"\") return wins.output.strip().split('\\n') def stop_app(device: Device,", "-> str: return device.app_current()['package'] + device.app_current()['activity'] def activity_window(device: Device, activity: str) -> list:", "@Modify Time @Author @Version @Description ------------ ------- -------- ----------- 2021/10/25 10:23 zxd 1.0", "u2.connect(addr) def top_app(device: Device) -> str: return device.current_app()['package'] def top_activity(device: Device) -> str:", "10:23 zxd 1.0 None \"\"\" from adbutils import adb import uiautomator2 as u2", "def connect(addr: str) -> Device: return u2.connect(addr) def top_app(device: Device) -> str: return", "return [device.serial for device in device_list] def connect(addr: str) -> Device: return u2.connect(addr)", "def top_app(device: Device) -> str: return device.current_app()['package'] def top_activity(device: Device) -> str: return", ": MIT @Modify Time @Author @Version @Description ------------ ------- -------- ----------- 2021/10/25 10:23", "else: return [device.serial for device in device_list] def connect(addr: str) -> Device: return", "@Contact : <EMAIL> @License : MIT @Modify Time @Author @Version @Description ------------ -------", "Device, activity: str) -> list: wins = device.shell(\"dumpsys window \" + activity +" ]
[ "executable, '--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(), ] env = get_r_env() if env.get('LD_LIBRARY_PATH'):", "env def setup_shiny(): '''Manage a Shiny instance.''' def _get_shiny_cmd(port): # server.r_path ??? conf", "os import tempfile import subprocess import getpass import shutil from textwrap import dedent", "if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH for", "env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception:", "R installation, and let user install stuff in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR,", "nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying", "rserver in PATH') cmd = [ executable, '--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(),", "'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def setup_shiny(): '''Manage a Shiny instance.'''", "if user specific? # make R kernel used configurable? # ... or rather", "_get_rsession_env(port): env = get_r_env() # rserver needs USER to be set to something", "be set to something sensible, # otherwise it'll throw up an authentication page", "to get R executable from kernel') # Detect various environment variables rsession requires", "with java) # e.g. would be nice if RStudio terminal starts with correct", "'icons', 'shiny.svg') } } def setup_rstudio(): def _get_rsession_env(port): env = get_r_env() # rserver", "'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir", "= getpass.getuser() return env def _get_r_executable(): try: # get notebook app from notebook.notebookapp", "setup_shiny(): '''Manage a Shiny instance.''' def _get_shiny_cmd(port): # server.r_path ??? conf = dedent(\"\"\"", "= [ # When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable", "def setup_shiny(): '''Manage a Shiny instance.''' def _get_shiny_cmd(port): # server.r_path ??? conf =", "-> either patch ~/Renviron / Renviron.site # -> user Rprofile.site (if conda env", "{site_dir}; log_dir {site_dir}/logs; directory_index on; }} }} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f", "[ # When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable =", "env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version,", "vars? # e.g. MAXENT, DISPLAY='' (to avoid issues with java) # e.g. would", "f.write(conf) f.close() return ['shiny-server', f.name] def _get_shiny_env(port): env = get_r_env() return env return", "conda env specific?) # -> use ~/.Rprofile ... if user specific? # make", "}} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return", "kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error", "kernel') return 'R' def _get_rsession_cmd(port): # Other paths rsession maybe in other_paths =", "from kernel') return 'R' def _get_rsession_cmd(port): # Other paths rsession maybe in other_paths", "_get_rsession_cmd(port): # Other paths rsession maybe in other_paths = [ # When rstudio-server", "import os import tempfile import subprocess import getpass import shutil from textwrap import", "java) # e.g. would be nice if RStudio terminal starts with correct conda", "conda env? # -> either patch ~/Renviron / Renviron.site # -> user Rprofile.site", "nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying to get R executable from", "... if user specific? # make R kernel used configurable? # ... or", "conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir +", "import dedent def get_r_env(): env = {} executable = 'R' try: # get", "= [ executable, '--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(), ] env = get_r_env()", "in PATH') cmd = [ executable, '--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(), ]", "# get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() # get", "'--rsession-which-r=' + _get_r_executable(), ] env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return", "nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH", "TODO: # maybe set a few more env vars? # e.g. MAXENT, DISPLAY=''", "'--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version =", "maybe set a few more env vars? # e.g. MAXENT, DISPLAY='' (to avoid", "requires to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla', '-e',", "= kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir", "{ 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def setup_rstudio(): def _get_rsession_env(port):", "import NotebookApp nbapp = NotebookApp.instance() # get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if", "RStudio terminal starts with correct conda env? # -> either patch ~/Renviron /", "avoid issues with java) # e.g. would be nice if RStudio terminal starts", ") f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] def _get_shiny_env(port): env", "+ env['LD_LIBRARY_PATH']) return cmd return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title':", "# When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable = 'rserver'", "needs USER to be set to something sensible, # otherwise it'll throw up", "'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env", "# otherwise it'll throw up an authentication page if not os.environ.get('USER', ''): env['USER']", "{{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }} }} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd()", "env['LD_LIBRARY_PATH']) return cmd return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title': 'RStudio',", "deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable = 'rserver' else: for op", "# e.g. would be nice if RStudio terminal starts with correct conda env?", "= 'R' try: # get notebook app from notebook.notebookapp import NotebookApp nbapp =", "executable = op break else: raise FileNotFoundError('Can not find rserver in PATH') cmd", "patch ~/Renviron / Renviron.site # -> user Rprofile.site (if conda env specific?) #", "[executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version", "shutil from textwrap import dedent def get_r_env(): env = {} executable = 'R'", "get R executable from kernel') return 'R' def _get_rsession_cmd(port): # Other paths rsession", "in other_paths = [ # When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if", "get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0]", "trying to get R executable from kernel') # Detect various environment variables rsession", "to something sensible, # otherwise it'll throw up an authentication page if not", "os.environ.get('USER', ''): env['USER'] = getpass.getuser() return env def _get_r_executable(): try: # get notebook", "= nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when", "kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda", "from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec", "correct conda env? # -> either patch ~/Renviron / Renviron.site # -> user", "os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir })", "maybe in other_paths = [ # When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ]", "'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def", "if os.path.exists(op): executable = op break else: raise FileNotFoundError('Can not find rserver in", "NotebookApp.instance() # get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name)", "nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable", "= os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir", "{ 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title': 'RStudio', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'rstudio.svg')", "used configurable? # ... or rather use standard system R installation, and let", "_get_shiny_cmd(port): # server.r_path ??? conf = dedent(\"\"\" run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks;", "'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def setup_rstudio(): def", "rsession requires to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla',", "other_paths: if os.path.exists(op): executable = op break else: raise FileNotFoundError('Can not find rserver", "log_dir {site_dir}/logs; directory_index on; }} }} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f =", "executable from kernel') # Detect various environment variables rsession requires to run #", "'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def setup_shiny():", "set to something sensible, # otherwise it'll throw up an authentication page if", "tempfile import subprocess import getpass import shutil from textwrap import dedent def get_r_env():", "rserver needs USER to be set to something sensible, # otherwise it'll throw", "directory_index on; }} }} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False)", "# make R kernel used configurable? # ... or rather use standard system", "get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() # get R", "MAXENT, DISPLAY='' (to avoid issues with java) # e.g. would be nice if", "executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib')", "# ... or rather use standard system R installation, and let user install", "# rserver needs USER to be set to something sensible, # otherwise it'll", "# Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output =", "configurable? # ... or rather use standard system R installation, and let user", "textwrap import dedent def get_r_env(): env = {} executable = 'R' try: #", "getpass import shutil from textwrap import dedent def get_r_env(): env = {} executable", "notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() # get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name", "Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd)", "specific? # make R kernel used configurable? # ... or rather use standard", "'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, })", "'rserver' else: for op in other_paths: if os.path.exists(op): executable = op break else:", "R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \\ r_output.decode().split(':') # TODO: # maybe set a", "version = \\ r_output.decode().split(':') # TODO: # maybe set a few more env", "from kernel') # Detect various environment variables rsession requires to run # Via", "tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] def _get_shiny_env(port): env = get_r_env() return", "src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR,", "make R kernel used configurable? # ... or rather use standard system R", "_get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def setup_rstudio():", "return env return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path':", "and let user install stuff in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME,", "'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except", "site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }} }} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() )", "'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def setup_rstudio(): def _get_rsession_env(port): env", "notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if", "R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \\ r_output.decode().split(':') # TODO: # maybe set", "if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env,", "would be nice if RStudio terminal starts with correct conda env? # ->", "Exception: nbapp.log.warning('Error when trying to get R executable from kernel') return 'R' def", "NotebookApp nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env)", "R kernel used configurable? # ... or rather use standard system R installation,", "_get_r_executable(): try: # get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance()", "getpass.getuser() return env def _get_r_executable(): try: # get notebook app from notebook.notebookapp import", "R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def", "R_DOC_DIR, version = \\ r_output.decode().split(':') # TODO: # maybe set a few more", "# -> either patch ~/Renviron / Renviron.site # -> user Rprofile.site (if conda", "Rprofile.site (if conda env specific?) # -> use ~/.Rprofile ... if user specific?", "except Exception: nbapp.log.warning('Error when trying to get R executable from kernel') return 'R'", "except Exception: nbapp.log.warning('Error when trying to get R executable from kernel') # Detect", "(if conda env specific?) # -> use ~/.Rprofile ... if user specific? #", "to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))']", "find rserver in PATH') cmd = [ executable, '--www-port=' + str(port), '--rsession-which-r=' +", "nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda env conda_lib_dir =", "= nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying to get R executable", "kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying to get R executable from kernel') return", "env def _get_r_executable(): try: # get notebook app from notebook.notebookapp import NotebookApp nbapp", "Renviron.site # -> user Rprofile.site (if conda env specific?) # -> use ~/.Rprofile", "port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] def", "= nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch", "= get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return { 'command': _get_rsession_cmd,", "# 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error", "['shiny-server', f.name] def _get_shiny_env(port): env = get_r_env() return env return { 'command': _get_shiny_cmd,", "= 'rserver' else: for op in other_paths: if os.path.exists(op): executable = op break", "R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return", "+ _get_r_executable(), ] env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd", "+ ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error when trying to", "user install stuff in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR,", "subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \\ r_output.decode().split(':') # TODO: # maybe", "{} executable = 'R' try: # get notebook app from notebook.notebookapp import NotebookApp", "kernel') # Detect various environment variables rsession requires to run # Via rstudio's", "else: raise FileNotFoundError('Can not find rserver in PATH') cmd = [ executable, '--www-port='", "home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME,", "executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception:", "dedent(\"\"\" run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location / {{ site_dir", "os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':' +", "not find rserver in PATH') cmd = [ executable, '--www-port=' + str(port), '--rsession-which-r='", "= subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \\ r_output.decode().split(':') # TODO: #", "env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'],", "site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] def _get_shiny_env(port):", "run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location / {{ site_dir {site_dir};", "if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying to", "= nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda env conda_lib_dir", "bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location / {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on;", "it'll throw up an authentication page if not os.environ.get('USER', ''): env['USER'] = getpass.getuser()", "if shutil.which('rserver'): executable = 'rserver' else: for op in other_paths: if os.path.exists(op): executable", "executable = 'rserver' else: for op in other_paths: if os.path.exists(op): executable = op", "with correct conda env? # -> either patch ~/Renviron / Renviron.site # ->", "'''Manage a Shiny instance.''' def _get_shiny_cmd(port): # server.r_path ??? conf = dedent(\"\"\" run_as", "} def setup_rstudio(): def _get_rsession_env(port): env = get_r_env() # rserver needs USER to", "run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output", "+ conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error when trying to get R", "def _get_rsession_cmd(port): # Other paths rsession maybe in other_paths = [ # When", "'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def setup_shiny(): '''Manage a", "stuff in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR,", "DISPLAY='' (to avoid issues with java) # e.g. would be nice if RStudio", "}} }} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close()", "Exception: nbapp.log.warning('Error when trying to get R executable from kernel') # Detect various", "be nice if RStudio terminal starts with correct conda env? # -> either", "env specific?) # -> use ~/.Rprofile ... if user specific? # make R", "FileNotFoundError('Can not find rserver in PATH') cmd = [ executable, '--www-port=' + str(port),", "= \\ r_output.decode().split(':') # TODO: # maybe set a few more env vars?", "# get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() kernel_name =", "e.g. would be nice if RStudio terminal starts with correct conda env? #", "conf = dedent(\"\"\" run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location /", "dedent def get_r_env(): env = {} executable = 'R' try: # get notebook", "{{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location / {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index", "nice if RStudio terminal starts with correct conda env? # -> either patch", "notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec =", "return ['shiny-server', f.name] def _get_shiny_env(port): env = get_r_env() return env return { 'command':", "return env def _get_r_executable(): try: # get notebook app from notebook.notebookapp import NotebookApp", "rather use standard system R installation, and let user install stuff in home", "R executable from kernel') return 'R' def _get_rsession_cmd(port): # Other paths rsession maybe", "/ Renviron.site # -> user Rprofile.site (if conda env specific?) # -> use", "in other_paths: if os.path.exists(op): executable = op break else: raise FileNotFoundError('Can not find", "env['USER'] = getpass.getuser() return env def _get_r_executable(): try: # get notebook app from", "kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir =", "cmd = [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR,", "user Rprofile.site (if conda env specific?) # -> use ~/.Rprofile ... if user", "user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name]", "server.r_path ??? conf = dedent(\"\"\" run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port};", "When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable = 'rserver' else:", "+ str(port), '--rsession-which-r=' + _get_r_executable(), ] env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' +", "= [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR,", "f.close() return ['shiny-server', f.name] def _get_shiny_env(port): env = get_r_env() return env return {", "return cmd return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title': 'RStudio', 'icon_path':", "'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') }", "R_INCLUDE_DIR, R_DOC_DIR, version = \\ r_output.decode().split(':') # TODO: # maybe set a few", "# Other paths rsession maybe in other_paths = [ # When rstudio-server deb", "break else: raise FileNotFoundError('Can not find rserver in PATH') cmd = [ executable,", "str(port), '--rsession-which-r=' + _get_r_executable(), ] env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH'])", "instance.''' def _get_shiny_cmd(port): # server.r_path ??? conf = dedent(\"\"\" run_as {user}; server {{", "variables rsession requires to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave',", "-> user Rprofile.site (if conda env specific?) # -> use ~/.Rprofile ... if", "server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location / {{ site_dir {site_dir}; log_dir {site_dir}/logs;", "/ {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }} }} \"\"\").format( user=getpass.getuser(), port=str(port),", "'--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(), ] env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path='", "up an authentication page if not os.environ.get('USER', ''): env['USER'] = getpass.getuser() return env", "conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error when trying to get R executable", "for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ #", "return 'R' def _get_rsession_cmd(port): # Other paths rsession maybe in other_paths = [", "for op in other_paths: if os.path.exists(op): executable = op break else: raise FileNotFoundError('Can", "to get R executable from kernel') return 'R' def _get_rsession_cmd(port): # Other paths", "an authentication page if not os.environ.get('USER', ''): env['USER'] = getpass.getuser() return env def", "trying to get R executable from kernel') return 'R' def _get_rsession_cmd(port): # Other", "'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error when", "cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': {", "f.name] def _get_shiny_env(port): env = get_r_env() return env return { 'command': _get_shiny_cmd, 'environment':", "get R executable from kernel') # Detect various environment variables rsession requires to", "various environment variables rsession requires to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd =", "= get_r_env() # rserver needs USER to be set to something sensible, #", "# maybe set a few more env vars? # e.g. MAXENT, DISPLAY='' (to", "use ~/.Rprofile ... if user specific? # make R kernel used configurable? #", "import getpass import shutil from textwrap import dedent def get_r_env(): env = {}", "cmd return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title': 'RStudio', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)),", "def setup_rstudio(): def _get_rsession_env(port): env = get_r_env() # rserver needs USER to be", "import NotebookApp nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name)", "app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name:", "or rather use standard system R installation, and let user install stuff in", "PATH') cmd = [ executable, '--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(), ] env", "get_r_env() return env return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny',", "}) return env def setup_shiny(): '''Manage a Shiny instance.''' def _get_shiny_cmd(port): # server.r_path", "'shiny.svg') } } def setup_rstudio(): def _get_rsession_env(port): env = get_r_env() # rserver needs", "installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable = 'rserver' else: for op in other_paths:", "get_r_env() # rserver needs USER to be set to something sensible, # otherwise", "else: for op in other_paths: if os.path.exists(op): executable = op break else: raise", "r_output.decode().split(':') # TODO: # maybe set a few more env vars? # e.g.", "~/Renviron / Renviron.site # -> user Rprofile.site (if conda env specific?) # ->", "-> use ~/.Rprofile ... if user specific? # make R kernel used configurable?", "R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except", "otherwise it'll throw up an authentication page if not os.environ.get('USER', ''): env['USER'] =", "_get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } }", "''): env['USER'] = getpass.getuser() return env def _get_r_executable(): try: # get notebook app", "USER to be set to something sensible, # otherwise it'll throw up an", "on; }} }} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf)", "# get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return", "os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def setup_rstudio(): def _get_rsession_env(port): env = get_r_env() #", "(to avoid issues with java) # e.g. would be nice if RStudio terminal", "= op break else: raise FileNotFoundError('Can not find rserver in PATH') cmd =", "kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying to get", "'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \\ r_output.decode().split(':') #", "standard system R installation, and let user install stuff in home folder? env.update({", "??? conf = dedent(\"\"\" run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location", "authentication page if not os.environ.get('USER', ''): env['USER'] = getpass.getuser() return env def _get_r_executable():", "r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error when trying", "def get_r_env(): env = {} executable = 'R' try: # get notebook app", "':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error when trying to get", "# Detect various environment variables rsession requires to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp", "setup_rstudio(): def _get_rsession_env(port): env = get_r_env() # rserver needs USER to be set", "from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() # get R executable: kernel_name =", "NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0]", "executable from kernel') return 'R' def _get_rsession_cmd(port): # Other paths rsession maybe in", "raise FileNotFoundError('Can not find rserver in PATH') cmd = [ executable, '--www-port=' +", "= dedent(\"\"\" run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location / {{", "let user install stuff in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR':", "when trying to get R executable from kernel') return 'R' def _get_rsession_cmd(port): #", "env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir", "env vars? # e.g. MAXENT, DISPLAY='' (to avoid issues with java) # e.g.", "'R' def _get_rsession_cmd(port): # Other paths rsession maybe in other_paths = [ #", "# e.g. MAXENT, DISPLAY='' (to avoid issues with java) # e.g. would be", "nbapp.log.warning('Error when trying to get R executable from kernel') return 'R' def _get_rsession_cmd(port):", "R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def setup_shiny(): '''Manage", "listen {port}; location / {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }} }}", "either patch ~/Renviron / Renviron.site # -> user Rprofile.site (if conda env specific?)", "get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return { 'command': _get_rsession_cmd, 'environment':", "LD_LIBRARY_PATH for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({", "f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] def _get_shiny_env(port): env =", "] env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return {", "#r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH':", "= os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':'", "installation, and let user install stuff in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME':", "subprocess import getpass import shutil from textwrap import dedent def get_r_env(): env =", "r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \\ r_output.decode().split(':') # TODO:", "'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def setup_rstudio(): def _get_rsession_env(port): env = get_r_env()", "conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH':", "def _get_rsession_env(port): env = get_r_env() # rserver needs USER to be set to", "is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable = 'rserver' else: for op in", "import tempfile import subprocess import getpass import shutil from textwrap import dedent def", "cmd = [ executable, '--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(), ] env =", "a Shiny instance.''' def _get_shiny_cmd(port): # server.r_path ??? conf = dedent(\"\"\" run_as {user};", "starts with correct conda env? # -> either patch ~/Renviron / Renviron.site #", "something sensible, # otherwise it'll throw up an authentication page if not os.environ.get('USER',", "install stuff in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR':", "# server.r_path ??? conf = dedent(\"\"\" run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen", "\"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server',", "use standard system R installation, and let user install stuff in home folder?", "few more env vars? # e.g. MAXENT, DISPLAY='' (to avoid issues with java)", "specific?) # -> use ~/.Rprofile ... if user specific? # make R kernel", "# -> user Rprofile.site (if conda env specific?) # -> use ~/.Rprofile ...", "terminal starts with correct conda env? # -> either patch ~/Renviron / Renviron.site", "def _get_shiny_cmd(port): # server.r_path ??? conf = dedent(\"\"\" run_as {user}; server {{ bookmark_state_dir", "[ executable, '--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(), ] env = get_r_env() if", "try: # get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() kernel_name", "when trying to get R executable from kernel') # Detect various environment variables", "executable = 'R' try: # get notebook app from notebook.notebookapp import NotebookApp nbapp", "other_paths = [ # When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'):", "}) except Exception: nbapp.log.warning('Error when trying to get R executable from kernel') #", "to be set to something sensible, # otherwise it'll throw up an authentication", "= {} executable = 'R' try: # get notebook app from notebook.notebookapp import", "_get_shiny_env(port): env = get_r_env() return env return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry':", "patch LD_LIBRARY_PATH for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib')", "conda_lib_dir }) except Exception: nbapp.log.warning('Error when trying to get R executable from kernel')", "e.g. MAXENT, DISPLAY='' (to avoid issues with java) # e.g. would be nice", "'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title': 'RStudio', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'rstudio.svg') }", "} } def setup_rstudio(): def _get_rsession_env(port): env = get_r_env() # rserver needs USER", "folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION':", "if not os.environ.get('USER', ''): env['USER'] = getpass.getuser() return env def _get_r_executable(): try: #", "op in other_paths: if os.path.exists(op): executable = op break else: raise FileNotFoundError('Can not", "~/.Rprofile ... if user specific? # make R kernel used configurable? # ...", "delete=False) f.write(conf) f.close() return ['shiny-server', f.name] def _get_shiny_env(port): env = get_r_env() return env", "op break else: raise FileNotFoundError('Can not find rserver in PATH') cmd = [", "_get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title': 'RStudio', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'rstudio.svg') } }", "'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def setup_shiny(): '''Manage a Shiny instance.''' def _get_shiny_cmd(port):", "{ 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg')", "R executable from kernel') # Detect various environment variables rsession requires to run", "set a few more env vars? # e.g. MAXENT, DISPLAY='' (to avoid issues", "R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def setup_shiny(): '''Manage a Shiny", "\\ r_output.decode().split(':') # TODO: # maybe set a few more env vars? #", "'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def setup_rstudio(): def _get_rsession_env(port): env =", "nbapp = NotebookApp.instance() # get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec", "'--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \\", "version, }) return env def setup_shiny(): '''Manage a Shiny instance.''' def _get_shiny_cmd(port): #", "env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry':", "rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME,", "= NotebookApp.instance() # get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec =", "... or rather use standard system R installation, and let user install stuff", "import subprocess import getpass import shutil from textwrap import dedent def get_r_env(): env", "Shiny instance.''' def _get_shiny_cmd(port): # server.r_path ??? conf = dedent(\"\"\" run_as {user}; server", "{site_dir}/logs; directory_index on; }} }} \"\"\").format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w',", "return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons',", "kernel used configurable? # ... or rather use standard system R installation, and", "sensible, # otherwise it'll throw up an authentication page if not os.environ.get('USER', ''):", "get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name", "'/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable = 'rserver' else: for op in other_paths: if", "in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME':", "def _get_r_executable(): try: # get notebook app from notebook.notebookapp import NotebookApp nbapp =", "from textwrap import dedent def get_r_env(): env = {} executable = 'R' try:", "# TODO: # maybe set a few more env vars? # e.g. MAXENT,", "'-e', 'cat(paste(R.home(\"home\"),R.home(\"share\"),R.home(\"include\"),R.home(\"doc\"),getRversion(),sep=\":\"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \\ r_output.decode().split(':')", "rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable = 'rserver' else: for", "NotebookApp nbapp = NotebookApp.instance() # get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name:", "env? # -> either patch ~/Renviron / Renviron.site # -> user Rprofile.site (if", "shutil.which('rserver'): executable = 'rserver' else: for op in other_paths: if os.path.exists(op): executable =", "return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title': 'RStudio', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons',", "rsession maybe in other_paths = [ # When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver',", "if RStudio terminal starts with correct conda env? # -> either patch ~/Renviron", "import shutil from textwrap import dedent def get_r_env(): env = {} executable =", "return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying to get R executable from kernel')", "return env def setup_shiny(): '''Manage a Shiny instance.''' def _get_shiny_cmd(port): # server.r_path ???", "env return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)),", "kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] #", "not os.environ.get('USER', ''): env['USER'] = getpass.getuser() return env def _get_r_executable(): try: # get", "_get_r_executable(), ] env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return", "= NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable =", "env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return { 'command':", "'R' try: # get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance()", "kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda env", "get_r_env(): env = {} executable = 'R' try: # get notebook app from", "nbapp.log.warning('Error when trying to get R executable from kernel') # Detect various environment", "{port}; location / {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }} }} \"\"\").format(", "'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error when trying to get R executable from", "{user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location / {{ site_dir {site_dir}; log_dir", "env = get_r_env() # rserver needs USER to be set to something sensible,", "throw up an authentication page if not os.environ.get('USER', ''): env['USER'] = getpass.getuser() return", "kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying to get R", "R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def setup_shiny(): '''Manage a Shiny instance.''' def", "= tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] def _get_shiny_env(port): env = get_r_env()", "app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() # get R executable: kernel_name", "] if shutil.which('rserver'): executable = 'rserver' else: for op in other_paths: if os.path.exists(op):", "location / {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }} }} \"\"\").format( user=getpass.getuser(),", "user specific? # make R kernel used configurable? # ... or rather use", "page if not os.environ.get('USER', ''): env['USER'] = getpass.getuser() return env def _get_r_executable(): try:", "notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() # get R executable:", "os.path.exists(op): executable = op break else: raise FileNotFoundError('Can not find rserver in PATH')", "Detect various environment variables rsession requires to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd", "Other paths rsession maybe in other_paths = [ # When rstudio-server deb is", "environment variables rsession requires to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable,", "paths rsession maybe in other_paths = [ # When rstudio-server deb is installed", "{site_dir}/shiny-server-boomarks; listen {port}; location / {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }}", "system R installation, and let user install stuff in home folder? env.update({ 'R_DOC_DIR':", "a few more env vars? # e.g. MAXENT, DISPLAY='' (to avoid issues with", "# -> use ~/.Rprofile ... if user specific? # make R kernel used", "= get_r_env() return env return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title':", "# patch LD_LIBRARY_PATH for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir,", "env = get_r_env() return env return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': {", "issues with java) # e.g. would be nice if RStudio terminal starts with", "try: # get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() #", "more env vars? # e.g. MAXENT, DISPLAY='' (to avoid issues with java) #", "env = {} executable = 'R' try: # get notebook app from notebook.notebookapp", "def _get_shiny_env(port): env = get_r_env() return env return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env," ]
[ "in s: if (i.islower()): res.append(chr(219 - ord(i))) else: res.append(i) return ''.join(res) text =", "- ord(i))) else: res.append(i) return ''.join(res) text = \"chika chika\" a = cipher(text)", "if (i.islower()): res.append(chr(219 - ord(i))) else: res.append(i) return ''.join(res) text = \"chika chika\"", "def cipher(s): res = [] for i in s: if (i.islower()): res.append(chr(219 -", "[] for i in s: if (i.islower()): res.append(chr(219 - ord(i))) else: res.append(i) return", "else: res.append(i) return ''.join(res) text = \"chika chika\" a = cipher(text) print(a) b", "cipher(s): res = [] for i in s: if (i.islower()): res.append(chr(219 - ord(i)))", "i in s: if (i.islower()): res.append(chr(219 - ord(i))) else: res.append(i) return ''.join(res) text", "res.append(chr(219 - ord(i))) else: res.append(i) return ''.join(res) text = \"chika chika\" a =", "ord(i))) else: res.append(i) return ''.join(res) text = \"chika chika\" a = cipher(text) print(a)", "res = [] for i in s: if (i.islower()): res.append(chr(219 - ord(i))) else:", "= [] for i in s: if (i.islower()): res.append(chr(219 - ord(i))) else: res.append(i)", "return ''.join(res) text = \"chika chika\" a = cipher(text) print(a) b = cipher(a)", "(i.islower()): res.append(chr(219 - ord(i))) else: res.append(i) return ''.join(res) text = \"chika chika\" a", "''.join(res) text = \"chika chika\" a = cipher(text) print(a) b = cipher(a) print(b)", "res.append(i) return ''.join(res) text = \"chika chika\" a = cipher(text) print(a) b =", "s: if (i.islower()): res.append(chr(219 - ord(i))) else: res.append(i) return ''.join(res) text = \"chika", "for i in s: if (i.islower()): res.append(chr(219 - ord(i))) else: res.append(i) return ''.join(res)" ]
[ "os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list = [] for i in file_list: with open(i)", "file_list file_list = [] dirs_list = [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt')", "if tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = [] ################ Plot Graph ###################", "plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing datasets')", "width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation of DeepLesion on Zhoubing100')", "of DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) ################# Total number of", "import os import numpy as np import json import matplotlib.pyplot as plt; plt.rcdefaults()", "file) return file_list file_list = [] dirs_list = [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list=", "#plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation of DeepLesion on", "for item in data_list: for row in item: if 'lesion' in row: num_lesion.append(row)", "tag_class = json_data['tag_dict_list'] tag_list = [] tag = [] for item in tag_class:", "02:29:53 2019 @author: leeh43 \"\"\" import os import numpy as np import json", "################# Total number of lesion ################ num_lesion = [] for item in data_list:", "= 0.5 #fig = plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list)", "################# Total number of liver lesion ################ liver_list = [] for item in", "= os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file) as f: json_data = json.load(f) tag_class=[] tag_class", "= [x.strip() for x in content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with", "+ '/' + file) return file_list file_list = [] dirs_list = [] data_dir", "import numpy as np import json import matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot", "plt.rcdefaults() import matplotlib.pyplot as plt import random def get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir):", "[] dirs_list = [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list = []", "data_list = [] for i in file_list: with open(i) as f: content =", "###################### intermediate_list = [] num_list = [] for tag in tag_list: for item", "Completed ###################### intermediate_list = [] num_list = [] for tag in tag_list: for", "'/' + file) return file_list file_list = [] dirs_list = [] data_dir =", "'lesion' in row: num_lesion.append(row) ################# Total number of liver lesion ################ liver_list =", "plt; plt.rcdefaults() import matplotlib.pyplot as plt import random def get_files_endswith(src_dir, ext): if not", "2019 @author: leeh43 \"\"\" import os import numpy as np import json import", "################ liver_list = [] for item in num_lesion: if 'liver' in item: liver_list.append(item)", "# -*- coding: utf-8 -*- \"\"\" Created on Sun Sep 29 02:29:53 2019", "datasets') #plt.title('Evaluation of DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) ################# Total", "################## Get Tag List Completed ###################### intermediate_list = [] num_list = [] for", "utf-8 -*- \"\"\" Created on Sun Sep 29 02:29:53 2019 @author: leeh43 \"\"\"", "get_files_endswith(data_dir, '.txt') data_list = [] for i in file_list: with open(i) as f:", "\"\"\" Created on Sun Sep 29 02:29:53 2019 @author: leeh43 \"\"\" import os", "Sep 29 02:29:53 2019 @author: leeh43 \"\"\" import os import numpy as np", "not exist:' + src_dir) file_list = [] for root, dirs, files in os.walk(src_dir):", "for row in item: if 'lesion' in row: num_lesion.append(row) ################# Total number of", "ext): if not os.path.isdir(src_dir): raise ValueError('Folder does not exist:' + src_dir) file_list =", "for item in tag_class: tag_list.append(item['tag']) ################## Get Tag List Completed ###################### intermediate_list =", "align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation of DeepLesion on Zhoubing100') #plt.show()", "row in item: if 'lesion' in row: num_lesion.append(row) ################# Total number of liver", "[] ################ Plot Graph ################### #width = 0.5 #fig = plt.figure(figsize=(10,40)) #y_pos =", "tag_class: tag_list.append(item['tag']) ################## Get Tag List Completed ###################### intermediate_list = [] num_list =", "file_list: with open(i) as f: content = f.readlines() content = [x.strip() for x", "= plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing", "in item: if tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = [] ################ Plot", "for x in content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file) as", "leeh43 \"\"\" import os import numpy as np import json import matplotlib.pyplot as", "f.readlines() content = [x.strip() for x in content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' +", "= [] for item in data_list: for row in item: if 'lesion' in", "#y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation", "src_dir) file_list = [] for root, dirs, files in os.walk(src_dir): for file in", "#plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) ################# Total number of lesion ################ num_lesion =", "json_data = json.load(f) tag_class=[] tag_class = json_data['tag_dict_list'] tag_list = [] tag = []", "for i in file_list: with open(i) as f: content = f.readlines() content =", "coding: utf-8 -*- \"\"\" Created on Sun Sep 29 02:29:53 2019 @author: leeh43", "item in data_list: for row in item: if tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list))", "import json import matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot as plt import random", "in row: num_lesion.append(row) ################# Total number of liver lesion ################ liver_list = []", "= [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list = [] for i", "= os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list = [] for i in file_list: with", "[] for item in data_list: for row in item: if 'lesion' in row:", "open(i) as f: content = f.readlines() content = [x.strip() for x in content]", "[] for item in tag_class: tag_list.append(item['tag']) ################## Get Tag List Completed ###################### intermediate_list", "tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = [] ################ Plot Graph ################### #width", "#fig = plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in", "[x.strip() for x in content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file)", "lesion ################ liver_list = [] for item in num_lesion: if 'liver' in item:", "root, dirs, files in os.walk(src_dir): for file in files: if file.lower().endswith(ext.lower()): file_list.append(root +", "row in item: if tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = [] ################", "\"\"\" import os import numpy as np import json import matplotlib.pyplot as plt;", "= np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation of", "if not os.path.isdir(src_dir): raise ValueError('Folder does not exist:' + src_dir) file_list = []", "################ num_lesion = [] for item in data_list: for row in item: if", "in files: if file.lower().endswith(ext.lower()): file_list.append(root + '/' + file) return file_list file_list =", "with open(i) as f: content = f.readlines() content = [x.strip() for x in", "tag_list = [] tag = [] for item in tag_class: tag_list.append(item['tag']) ################## Get", "tag_list.append(item['tag']) ################## Get Tag List Completed ###################### intermediate_list = [] num_list = []", "of lesion ################ num_lesion = [] for item in data_list: for row in", "data_list: for row in item: if tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list =", "data_list: for row in item: if 'lesion' in row: num_lesion.append(row) ################# Total number", "40) #fig.savefig('evaluation.png', dpi=300) ################# Total number of lesion ################ num_lesion = [] for", "for file in files: if file.lower().endswith(ext.lower()): file_list.append(root + '/' + file) return file_list", "open(json_file) as f: json_data = json.load(f) tag_class=[] tag_class = json_data['tag_dict_list'] tag_list = []", "-*- \"\"\" Created on Sun Sep 29 02:29:53 2019 @author: leeh43 \"\"\" import", "files in os.walk(src_dir): for file in files: if file.lower().endswith(ext.lower()): file_list.append(root + '/' +", "[] for i in file_list: with open(i) as f: content = f.readlines() content", "row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = [] ################ Plot Graph ################### #width = 0.5", "np import json import matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot as plt import", "#plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation of DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25,", "29 02:29:53 2019 @author: leeh43 \"\"\" import os import numpy as np import", "file_list= get_files_endswith(data_dir, '.txt') data_list = [] for i in file_list: with open(i) as", "= [] num_list = [] for tag in tag_list: for item in data_list:", "intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = [] ################ Plot Graph ################### #width = 0.5 #fig", "tag = [] for item in tag_class: tag_list.append(item['tag']) ################## Get Tag List Completed", "Total number of lesion ################ num_lesion = [] for item in data_list: for", "tag_class=[] tag_class = json_data['tag_dict_list'] tag_list = [] tag = [] for item in", "f: json_data = json.load(f) tag_class=[] tag_class = json_data['tag_dict_list'] tag_list = [] tag =", "if 'lesion' in row: num_lesion.append(row) ################# Total number of liver lesion ################ liver_list", "number of liver lesion ################ liver_list = [] for item in num_lesion: if", "+ src_dir) file_list = [] for root, dirs, files in os.walk(src_dir): for file", "Tag List Completed ###################### intermediate_list = [] num_list = [] for tag in", "matplotlib.pyplot as plt import random def get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir): raise ValueError('Folder", "#fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) ################# Total number of lesion ################ num_lesion = []", "= [] ################ Plot Graph ################### #width = 0.5 #fig = plt.figure(figsize=(10,40)) #y_pos", "[] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list = [] for i in", "json_data['tag_dict_list'] tag_list = [] tag = [] for item in tag_class: tag_list.append(item['tag']) ##################", "num_list.append(len(intermediate_list)) intermediate_list = [] ################ Plot Graph ################### #width = 0.5 #fig =", "as f: json_data = json.load(f) tag_class=[] tag_class = json_data['tag_dict_list'] tag_list = [] tag", "in os.walk(src_dir): for file in files: if file.lower().endswith(ext.lower()): file_list.append(root + '/' + file)", "def get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir): raise ValueError('Folder does not exist:' + src_dir)", "import matplotlib.pyplot as plt import random def get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir): raise", "num_lesion.append(row) ################# Total number of liver lesion ################ liver_list = [] for item", "[] for tag in tag_list: for item in data_list: for row in item:", "of liver lesion ################ liver_list = [] for item in num_lesion: if 'liver'", "'tags_cache.json') with open(json_file) as f: json_data = json.load(f) tag_class=[] tag_class = json_data['tag_dict_list'] tag_list", "= [] for i in file_list: with open(i) as f: content = f.readlines()", "if file.lower().endswith(ext.lower()): file_list.append(root + '/' + file) return file_list file_list = [] dirs_list", "as np import json import matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot as plt", "-*- coding: utf-8 -*- \"\"\" Created on Sun Sep 29 02:29:53 2019 @author:", "raise ValueError('Folder does not exist:' + src_dir) file_list = [] for root, dirs,", "exist:' + src_dir) file_list = [] for root, dirs, files in os.walk(src_dir): for", "[] for root, dirs, files in os.walk(src_dir): for file in files: if file.lower().endswith(ext.lower()):", "file_list.append(root + '/' + file) return file_list file_list = [] dirs_list = []", "content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file) as f: json_data =", "List Completed ###################### intermediate_list = [] num_list = [] for tag in tag_list:", "Zhoubing datasets') #plt.title('Evaluation of DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) #################", "#fig.savefig('evaluation.png', dpi=300) ################# Total number of lesion ################ num_lesion = [] for item", "data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list = [] for i in file_list:", "= [] tag = [] for item in tag_class: tag_list.append(item['tag']) ################## Get Tag", "#width = 0.5 #fig = plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos,", "item: if tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = [] ################ Plot Graph", "################ Plot Graph ################### #width = 0.5 #fig = plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list))", "get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir): raise ValueError('Folder does not exist:' + src_dir) file_list", "= json_data['tag_dict_list'] tag_list = [] tag = [] for item in tag_class: tag_list.append(item['tag'])", "files: if file.lower().endswith(ext.lower()): file_list.append(root + '/' + file) return file_list file_list = []", "+ 'tags_cache.json') with open(json_file) as f: json_data = json.load(f) tag_class=[] tag_class = json_data['tag_dict_list']", "num_lesion = [] for item in data_list: for row in item: if 'lesion'", "json.load(f) tag_class=[] tag_class = json_data['tag_dict_list'] tag_list = [] tag = [] for item", "Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) ################# Total number of lesion ################ num_lesion", "file_list = [] for root, dirs, files in os.walk(src_dir): for file in files:", "item in tag_class: tag_list.append(item['tag']) ################## Get Tag List Completed ###################### intermediate_list = []", "file.lower().endswith(ext.lower()): file_list.append(root + '/' + file) return file_list file_list = [] dirs_list =", "Plot Graph ################### #width = 0.5 #fig = plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list,", "DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) ################# Total number of lesion", "in item: if 'lesion' in row: num_lesion.append(row) ################# Total number of liver lesion", "plt import random def get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir): raise ValueError('Folder does not", "= [] dirs_list = [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list =", "################### #width = 0.5 #fig = plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center')", "import random def get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir): raise ValueError('Folder does not exist:'", "Total number of liver lesion ################ liver_list = [] for item in num_lesion:", "in data_list: for row in item: if tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list", "x in content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file) as f:", "os.path.isdir(src_dir): raise ValueError('Folder does not exist:' + src_dir) file_list = [] for root,", "number of lesion ################ num_lesion = [] for item in data_list: for row", "item: if 'lesion' in row: num_lesion.append(row) ################# Total number of liver lesion ################", "content = f.readlines() content = [x.strip() for x in content] data_list.append(content) json_file =", "file in files: if file.lower().endswith(ext.lower()): file_list.append(root + '/' + file) return file_list file_list", "random def get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir): raise ValueError('Folder does not exist:' +", "ValueError('Folder does not exist:' + src_dir) file_list = [] for root, dirs, files", "as f: content = f.readlines() content = [x.strip() for x in content] data_list.append(content)", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Sun Sep 29", "with open(json_file) as f: json_data = json.load(f) tag_class=[] tag_class = json_data['tag_dict_list'] tag_list =", "intermediate_list = [] ################ Plot Graph ################### #width = 0.5 #fig = plt.figure(figsize=(10,40))", "tag_list: for item in data_list: for row in item: if tag in row:", "for row in item: if tag in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = []", "dpi=300) ################# Total number of lesion ################ num_lesion = [] for item in", "<reponame>leeh43/MULAN_universal_lesion_analysis #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Sun Sep", "in data_list: for row in item: if 'lesion' in row: num_lesion.append(row) ################# Total", "in Zhoubing datasets') #plt.title('Evaluation of DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300)", "matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot as plt import random def get_files_endswith(src_dir, ext):", "content = [x.strip() for x in content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json')", "in row: intermediate_list.append(row) num_list.append(len(intermediate_list)) intermediate_list = [] ################ Plot Graph ################### #width =", "tag_list) #plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation of DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40)", "Sun Sep 29 02:29:53 2019 @author: leeh43 \"\"\" import os import numpy as", "[] num_list = [] for tag in tag_list: for item in data_list: for", "as plt; plt.rcdefaults() import matplotlib.pyplot as plt import random def get_files_endswith(src_dir, ext): if", "json import matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot as plt import random def", "for root, dirs, files in os.walk(src_dir): for file in files: if file.lower().endswith(ext.lower()): file_list.append(root", "= [] for tag in tag_list: for item in data_list: for row in", "on Sun Sep 29 02:29:53 2019 @author: leeh43 \"\"\" import os import numpy", "import matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot as plt import random def get_files_endswith(src_dir,", "Get Tag List Completed ###################### intermediate_list = [] num_list = [] for tag", "in content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file) as f: json_data", "num_list = [] for tag in tag_list: for item in data_list: for row", "in tag_class: tag_list.append(item['tag']) ################## Get Tag List Completed ###################### intermediate_list = [] num_list", "dirs_list = [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list = [] for", "lesion ################ num_lesion = [] for item in data_list: for row in item:", "os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file) as f: json_data = json.load(f) tag_class=[] tag_class =", "tag in tag_list: for item in data_list: for row in item: if tag", "+ file) return file_list file_list = [] dirs_list = [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results')", "#plt.title('Evaluation of DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) ################# Total number", "[] tag = [] for item in tag_class: tag_list.append(item['tag']) ################## Get Tag List", "for item in data_list: for row in item: if tag in row: intermediate_list.append(row)", "on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png', dpi=300) ################# Total number of lesion ################", "os.walk(src_dir): for file in files: if file.lower().endswith(ext.lower()): file_list.append(root + '/' + file) return", "Graph ################### #width = 0.5 #fig = plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width,", "= [] for root, dirs, files in os.walk(src_dir): for file in files: if", "os import numpy as np import json import matplotlib.pyplot as plt; plt.rcdefaults() import", "i in file_list: with open(i) as f: content = f.readlines() content = [x.strip()", "numpy as np import json import matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot as", "f: content = f.readlines() content = [x.strip() for x in content] data_list.append(content) json_file", "as plt import random def get_files_endswith(src_dir, ext): if not os.path.isdir(src_dir): raise ValueError('Folder does", "np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation of DeepLesion", "row: num_lesion.append(row) ################# Total number of liver lesion ################ liver_list = [] for", "0.5 #fig = plt.figure(figsize=(10,40)) #y_pos = np.arange(len(tag_list)) #plt.barh(y_pos,num_list, width, align='center') #plt.yticks(y_pos, tag_list) #plt.xlabel('Numbers", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Sun Sep 29 02:29:53", "'.txt') data_list = [] for i in file_list: with open(i) as f: content", "= [] for item in tag_class: tag_list.append(item['tag']) ################## Get Tag List Completed ######################", "dirs, files in os.walk(src_dir): for file in files: if file.lower().endswith(ext.lower()): file_list.append(root + '/'", "data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file) as f: json_data = json.load(f)", "return file_list file_list = [] dirs_list = [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir,", "@author: leeh43 \"\"\" import os import numpy as np import json import matplotlib.pyplot", "not os.path.isdir(src_dir): raise ValueError('Folder does not exist:' + src_dir) file_list = [] for", "= f.readlines() content = [x.strip() for x in content] data_list.append(content) json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/'", "in file_list: with open(i) as f: content = f.readlines() content = [x.strip() for", "does not exist:' + src_dir) file_list = [] for root, dirs, files in", "= json.load(f) tag_class=[] tag_class = json_data['tag_dict_list'] tag_list = [] tag = [] for", "for tag in tag_list: for item in data_list: for row in item: if", "#plt.xlabel('Numbers in Zhoubing datasets') #plt.title('Evaluation of DeepLesion on Zhoubing100') #plt.show() #fig.set_size_inches(25, 40) #fig.savefig('evaluation.png',", "intermediate_list = [] num_list = [] for tag in tag_list: for item in", "in tag_list: for item in data_list: for row in item: if tag in", "file_list = [] dirs_list = [] data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results') file_list= get_files_endswith(data_dir, '.txt') data_list", "json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/' + 'tags_cache.json') with open(json_file) as f: json_data = json.load(f) tag_class=[]", "Created on Sun Sep 29 02:29:53 2019 @author: leeh43 \"\"\" import os import", "item in data_list: for row in item: if 'lesion' in row: num_lesion.append(row) #################", "liver lesion ################ liver_list = [] for item in num_lesion: if 'liver' in" ]
[ "return \"metric\" @property def UnitModules(self): return [ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ] @property", "return Millimeter @property def Centimeter(self): from .centimeter import Centimeter return Centimeter @property def", "# May need to be \"id\" for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\": mod.BaseConversionMultiplier, }", "import SystemToDict return SystemToDict(self) def get_all_unit_combos(self): units = [] for mod in self.UnitModules:", "@property def Meter(self): from .meter import Meter return Meter def ToDict(self): from ..", "Meter(self): from .meter import Meter return Meter def ToDict(self): from .. import SystemToDict", "need to be \"id\" for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\": mod.BaseConversionMultiplier, } units.append(unit_combo) return", "def ToDict(self): from .. import SystemToDict return SystemToDict(self) def get_all_unit_combos(self): units = []", "DisplayName(self): return \"Metric\" @property def AssetPath(self): return \"metric\" @property def UnitModules(self): return [", "\"id\" for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\": mod.BaseConversionMultiplier, } units.append(unit_combo) return units Metric =", "# <NAME>, <EMAIL> class _Metric: def __init__(self): pass @property def DisplayName(self): return \"Metric\"", "@property def DisplayName(self): return \"Metric\" @property def AssetPath(self): return \"metric\" @property def UnitModules(self):", "return Meter def ToDict(self): from .. import SystemToDict return SystemToDict(self) def get_all_unit_combos(self): units", "\"metric\" @property def UnitModules(self): return [ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ] @property def", "#!/usr/bin/python # # 2022 <NAME>, <EMAIL> # <NAME>, <EMAIL> class _Metric: def __init__(self):", "def Centimeter(self): from .centimeter import Centimeter return Centimeter @property def Decimeter(self): from .decimeter", "] @property def UnitCombos(self): return self.get_all_unit_combos() @property def BaseUnit(self): return self.Meter.ToDict() @property def", "Meter def ToDict(self): from .. import SystemToDict return SystemToDict(self) def get_all_unit_combos(self): units =", "be \"id\" for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\": mod.BaseConversionMultiplier, } units.append(unit_combo) return units Metric", "\"Metric\" @property def AssetPath(self): return \"metric\" @property def UnitModules(self): return [ self.Millimeter, self.Centimeter,", "from .meter import Meter return Meter def ToDict(self): from .. import SystemToDict return", ".. import SystemToDict return SystemToDict(self) def get_all_unit_combos(self): units = [] for mod in", "self.Meter.ToDict() @property def Millimeter(self): from .millimeter import Millimeter return Millimeter @property def Centimeter(self):", "{ \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, # May need to be \"id\" for combos", "return \"Metric\" @property def AssetPath(self): return \"metric\" @property def UnitModules(self): return [ self.Millimeter,", "from .millimeter import Millimeter return Millimeter @property def Centimeter(self): from .centimeter import Centimeter", "for mod in self.UnitModules: unit_combo = { \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, # May", "<EMAIL> class _Metric: def __init__(self): pass @property def DisplayName(self): return \"Metric\" @property def", "Centimeter(self): from .centimeter import Centimeter return Centimeter @property def Decimeter(self): from .decimeter import", ".centimeter import Centimeter return Centimeter @property def Decimeter(self): from .decimeter import Decimeter return", "self.Decimeter, self.Meter ] @property def UnitCombos(self): return self.get_all_unit_combos() @property def BaseUnit(self): return self.Meter.ToDict()", "AssetPath(self): return \"metric\" @property def UnitModules(self): return [ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ]", "def DisplayName(self): return \"Metric\" @property def AssetPath(self): return \"metric\" @property def UnitModules(self): return", "def UnitCombos(self): return self.get_all_unit_combos() @property def BaseUnit(self): return self.Meter.ToDict() @property def Millimeter(self): from", "from .decimeter import Decimeter return Decimeter @property def Meter(self): from .meter import Meter", "return self.Meter.ToDict() @property def Millimeter(self): from .millimeter import Millimeter return Millimeter @property def", "def Decimeter(self): from .decimeter import Decimeter return Decimeter @property def Meter(self): from .meter", "def Millimeter(self): from .millimeter import Millimeter return Millimeter @property def Centimeter(self): from .centimeter", "\"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, # May need to be \"id\" for combos \"abbreviation\":", "def BaseUnit(self): return self.Meter.ToDict() @property def Millimeter(self): from .millimeter import Millimeter return Millimeter", "Decimeter(self): from .decimeter import Decimeter return Decimeter @property def Meter(self): from .meter import", "import Meter return Meter def ToDict(self): from .. import SystemToDict return SystemToDict(self) def", "in self.UnitModules: unit_combo = { \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, # May need to", "mod.DisplayName, \"asset_path\": mod.AssetPath, # May need to be \"id\" for combos \"abbreviation\": mod.Abbreviation,", "get_all_unit_combos(self): units = [] for mod in self.UnitModules: unit_combo = { \"display_name\": mod.DisplayName,", "pass @property def DisplayName(self): return \"Metric\" @property def AssetPath(self): return \"metric\" @property def", "@property def BaseUnit(self): return self.Meter.ToDict() @property def Millimeter(self): from .millimeter import Millimeter return", "self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ] @property def UnitCombos(self): return self.get_all_unit_combos() @property def BaseUnit(self):", "self.get_all_unit_combos() @property def BaseUnit(self): return self.Meter.ToDict() @property def Millimeter(self): from .millimeter import Millimeter", "return Centimeter @property def Decimeter(self): from .decimeter import Decimeter return Decimeter @property def", "to be \"id\" for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\": mod.BaseConversionMultiplier, } units.append(unit_combo) return units", "May need to be \"id\" for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\": mod.BaseConversionMultiplier, } units.append(unit_combo)", ".decimeter import Decimeter return Decimeter @property def Meter(self): from .meter import Meter return", "import Decimeter return Decimeter @property def Meter(self): from .meter import Meter return Meter", "from .. import SystemToDict return SystemToDict(self) def get_all_unit_combos(self): units = [] for mod", "mod in self.UnitModules: unit_combo = { \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, # May need", "[] for mod in self.UnitModules: unit_combo = { \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, #", "units = [] for mod in self.UnitModules: unit_combo = { \"display_name\": mod.DisplayName, \"asset_path\":", "\"asset_path\": mod.AssetPath, # May need to be \"id\" for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\":", "import Centimeter return Centimeter @property def Decimeter(self): from .decimeter import Decimeter return Decimeter", "def AssetPath(self): return \"metric\" @property def UnitModules(self): return [ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter", "= { \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, # May need to be \"id\" for", "Millimeter(self): from .millimeter import Millimeter return Millimeter @property def Centimeter(self): from .centimeter import", "<NAME>, <EMAIL> # <NAME>, <EMAIL> class _Metric: def __init__(self): pass @property def DisplayName(self):", "@property def AssetPath(self): return \"metric\" @property def UnitModules(self): return [ self.Millimeter, self.Centimeter, self.Decimeter,", "UnitCombos(self): return self.get_all_unit_combos() @property def BaseUnit(self): return self.Meter.ToDict() @property def Millimeter(self): from .millimeter", ".millimeter import Millimeter return Millimeter @property def Centimeter(self): from .centimeter import Centimeter return", "__init__(self): pass @property def DisplayName(self): return \"Metric\" @property def AssetPath(self): return \"metric\" @property", "import Millimeter return Millimeter @property def Centimeter(self): from .centimeter import Centimeter return Centimeter", "unit_combo = { \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, # May need to be \"id\"", "Centimeter @property def Decimeter(self): from .decimeter import Decimeter return Decimeter @property def Meter(self):", "<EMAIL> # <NAME>, <EMAIL> class _Metric: def __init__(self): pass @property def DisplayName(self): return", "def Meter(self): from .meter import Meter return Meter def ToDict(self): from .. import", "self.UnitModules: unit_combo = { \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath, # May need to be", "@property def Centimeter(self): from .centimeter import Centimeter return Centimeter @property def Decimeter(self): from", "return Decimeter @property def Meter(self): from .meter import Meter return Meter def ToDict(self):", "@property def UnitModules(self): return [ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ] @property def UnitCombos(self):", ".meter import Meter return Meter def ToDict(self): from .. import SystemToDict return SystemToDict(self)", "for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\": mod.BaseConversionMultiplier, } units.append(unit_combo) return units Metric = _Metric()", "def get_all_unit_combos(self): units = [] for mod in self.UnitModules: unit_combo = { \"display_name\":", "_Metric: def __init__(self): pass @property def DisplayName(self): return \"Metric\" @property def AssetPath(self): return", "mod.AssetPath, # May need to be \"id\" for combos \"abbreviation\": mod.Abbreviation, \"base_conversion_multiplier\": mod.BaseConversionMultiplier,", "@property def Millimeter(self): from .millimeter import Millimeter return Millimeter @property def Centimeter(self): from", "return self.get_all_unit_combos() @property def BaseUnit(self): return self.Meter.ToDict() @property def Millimeter(self): from .millimeter import", "Millimeter @property def Centimeter(self): from .centimeter import Centimeter return Centimeter @property def Decimeter(self):", "return [ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ] @property def UnitCombos(self): return self.get_all_unit_combos() @property", "from .centimeter import Centimeter return Centimeter @property def Decimeter(self): from .decimeter import Decimeter", "def __init__(self): pass @property def DisplayName(self): return \"Metric\" @property def AssetPath(self): return \"metric\"", "class _Metric: def __init__(self): pass @property def DisplayName(self): return \"Metric\" @property def AssetPath(self):", "Decimeter @property def Meter(self): from .meter import Meter return Meter def ToDict(self): from", "Decimeter return Decimeter @property def Meter(self): from .meter import Meter return Meter def", "ToDict(self): from .. import SystemToDict return SystemToDict(self) def get_all_unit_combos(self): units = [] for", "UnitModules(self): return [ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ] @property def UnitCombos(self): return self.get_all_unit_combos()", "def UnitModules(self): return [ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ] @property def UnitCombos(self): return", "Millimeter return Millimeter @property def Centimeter(self): from .centimeter import Centimeter return Centimeter @property", "return SystemToDict(self) def get_all_unit_combos(self): units = [] for mod in self.UnitModules: unit_combo =", "self.Meter ] @property def UnitCombos(self): return self.get_all_unit_combos() @property def BaseUnit(self): return self.Meter.ToDict() @property", "BaseUnit(self): return self.Meter.ToDict() @property def Millimeter(self): from .millimeter import Millimeter return Millimeter @property", "Meter return Meter def ToDict(self): from .. import SystemToDict return SystemToDict(self) def get_all_unit_combos(self):", "self.Centimeter, self.Decimeter, self.Meter ] @property def UnitCombos(self): return self.get_all_unit_combos() @property def BaseUnit(self): return", "# # 2022 <NAME>, <EMAIL> # <NAME>, <EMAIL> class _Metric: def __init__(self): pass", "# 2022 <NAME>, <EMAIL> # <NAME>, <EMAIL> class _Metric: def __init__(self): pass @property", "[ self.Millimeter, self.Centimeter, self.Decimeter, self.Meter ] @property def UnitCombos(self): return self.get_all_unit_combos() @property def", "@property def UnitCombos(self): return self.get_all_unit_combos() @property def BaseUnit(self): return self.Meter.ToDict() @property def Millimeter(self):", "@property def Decimeter(self): from .decimeter import Decimeter return Decimeter @property def Meter(self): from", "SystemToDict return SystemToDict(self) def get_all_unit_combos(self): units = [] for mod in self.UnitModules: unit_combo", "Centimeter return Centimeter @property def Decimeter(self): from .decimeter import Decimeter return Decimeter @property", "2022 <NAME>, <EMAIL> # <NAME>, <EMAIL> class _Metric: def __init__(self): pass @property def", "<NAME>, <EMAIL> class _Metric: def __init__(self): pass @property def DisplayName(self): return \"Metric\" @property", "= [] for mod in self.UnitModules: unit_combo = { \"display_name\": mod.DisplayName, \"asset_path\": mod.AssetPath,", "SystemToDict(self) def get_all_unit_combos(self): units = [] for mod in self.UnitModules: unit_combo = {" ]
[ "= DateTimeCreatedField() modified_at = DateTimeModifiedField() class Meta: get_latest_by = \"modified_at\" ordering = (\"-modified_at\",", "from django.db import models from .fields import DateTimeCreatedField, DateTimeModifiedField class BaseModel(models.Model): created_at =", "DateTimeModifiedField class BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at = DateTimeModifiedField() class Meta: get_latest_by =", "from .fields import DateTimeCreatedField, DateTimeModifiedField class BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at = DateTimeModifiedField()", "DateTimeCreatedField, DateTimeModifiedField class BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at = DateTimeModifiedField() class Meta: get_latest_by", "models from .fields import DateTimeCreatedField, DateTimeModifiedField class BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at =", "= DateTimeModifiedField() class Meta: get_latest_by = \"modified_at\" ordering = (\"-modified_at\", \"-created_at\") abstract =", ".fields import DateTimeCreatedField, DateTimeModifiedField class BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at = DateTimeModifiedField() class", "django.db import models from .fields import DateTimeCreatedField, DateTimeModifiedField class BaseModel(models.Model): created_at = DateTimeCreatedField()", "created_at = DateTimeCreatedField() modified_at = DateTimeModifiedField() class Meta: get_latest_by = \"modified_at\" ordering =", "DateTimeCreatedField() modified_at = DateTimeModifiedField() class Meta: get_latest_by = \"modified_at\" ordering = (\"-modified_at\", \"-created_at\")", "modified_at = DateTimeModifiedField() class Meta: get_latest_by = \"modified_at\" ordering = (\"-modified_at\", \"-created_at\") abstract", "import DateTimeCreatedField, DateTimeModifiedField class BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at = DateTimeModifiedField() class Meta:", "DateTimeModifiedField() class Meta: get_latest_by = \"modified_at\" ordering = (\"-modified_at\", \"-created_at\") abstract = True", "class BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at = DateTimeModifiedField() class Meta: get_latest_by = \"modified_at\"", "import models from .fields import DateTimeCreatedField, DateTimeModifiedField class BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at", "BaseModel(models.Model): created_at = DateTimeCreatedField() modified_at = DateTimeModifiedField() class Meta: get_latest_by = \"modified_at\" ordering" ]
[ "hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag < 0;\") def getIncomingTransfers(): connection =", "0;\") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where", "def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag", "def getAllTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\") def getOutgoingTransfers():", "\"select * from umsatz;\") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select *", "hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag < 0;\") def getIncomingTransfers():", "= hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus()", "import hib_sql_connection def getAllTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\")", "'alexanderstolz' import hib_sql_connection def getAllTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from", "connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag < 0;\")", "\"select * from umsatz where betrag < 0;\") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus()", "__author__ = 'alexanderstolz' import hib_sql_connection def getAllTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select", "hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select", "return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag < 0;\") def getIncomingTransfers(): connection", "umsatz where betrag < 0;\") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select", "= 'alexanderstolz' import hib_sql_connection def getAllTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select *", "< 0;\") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz", "connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\") def getOutgoingTransfers(): connection =", "connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag > 0;\")", "def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag", "from umsatz;\") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz", "hib_sql_connection def getAllTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\") def", "where betrag < 0;\") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select *", "return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection,", "getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag >", "= hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag < 0;\") def", "* from umsatz where betrag < 0;\") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return", "getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where betrag <", "from umsatz where betrag < 0;\") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection,", "betrag < 0;\") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from", "* from umsatz;\") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from", "hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return", "getAllTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz;\") def getOutgoingTransfers(): connection", "umsatz;\") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, \"select * from umsatz where" ]
[ "config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data = None self.analysis = None", "= None raise ex return self.data def get_analysis(self ): if self.data is None:", "get_ema(self): if self.csdata is not None: try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data =", "action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis = res", "res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis = res return res def format_view(self): newres = dict(self.analysis[\"analysis\"])", "\"#FFF5EE\", \"yAxis\": 1, \"values\": data }] def get_ema(self): if self.csdata is not None:", "}, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] = ema", "smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data) #", "= None for k in range(-1,-10,-1): if slope == None: slope = self.data[k-1]", "slope = slope / ( self.data[k-1] / self.data[k] ) last_price = self.csdata[\"closed\"][-1] closing_time", "/ ( self.data[k-1] / self.data[k] ) last_price = self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action", "BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config = {}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"] =", "res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"]", "ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis = res return res def", "res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis = res return", "} res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] = slope", "res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis = res return res def format_view(self):", "return res def format_view(self): newres = dict(self.analysis[\"analysis\"]) newres[\"slope\"] = \"{:.4f}\".format(newres[\"slope\"]) newres[\"ema\"] = \"{:.8f}\".format(newres[\"ema\"])", "= {}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] =", "slope = self.data[k-1] / self.data[k] else: slope = slope / ( self.data[k-1] /", "action = None if last_price < ema: action = \"oversold\" res = {", "return \"{}\".format(self.config[\"period\"]) def get_charts(self): data = [] for i in range(0,len(self.csdata[ self.config[\"metric\"] ])):", "from collections import OrderedDict from baseindicator import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config", "\"{}\".format(self.config[\"period\"]) def get_charts(self): data = [] for i in range(0,len(self.csdata[ self.config[\"metric\"] ])): if", "range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())", "self.data is None: self.get_ema() ema = self.data[-1] ema1 = self.data[-2] slope = None", "ex return self.data def get_analysis(self ): if self.data is None: self.get_ema() ema =", "for k in range(-1,-10,-1): if slope == None: slope = self.data[k-1] / self.data[k]", "self.data[k-1] / self.data[k] ) last_price = self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action = None", "import OrderedDict from baseindicator import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config = {}):", "\"weight\": 2, \"time\": closing_time, \"indicator-data\": { \"ema\": ema }, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"]", "def __init__(self,csdata, config = {}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] =", "= [] for i in range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i] >", "= self.scaledown(data) # scaledown except Exception as ex: self.data = None raise ex", "# scaledown except Exception as ex: self.data = None raise ex return self.data", "return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\": data }]", "== None: slope = self.data[k-1] / self.data[k] else: slope = slope / (", "= slope / ( self.data[k-1] / self.data[k] ) last_price = self.csdata[\"closed\"][-1] closing_time =", "else: slope = slope / ( self.data[k-1] / self.data[k] ) last_price = self.csdata[\"closed\"][-1]", "{}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"])", "\"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\": data }] def get_ema(self): if self.csdata is", "except Exception as ex: self.data = None raise ex return self.data def get_analysis(self", "EMA(BaseIndicator): def __init__(self,csdata, config = {}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"]", "self.data[-1] ema1 = self.data[-2] slope = None for k in range(-1,-10,-1): if slope", "\"y\": self.data[i], }) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1,", "data }] def get_ema(self): if self.csdata is not None: try: smetric = self.scaleup(", "ema: action = \"oversold\" res = { \"weight\": 2, \"time\": closing_time, \"indicator-data\": {", "res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis", "self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action = None if last_price < ema: action =", "in range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i],", "import os,sys,talib,numpy,math,logging,time,datetime,numbers from collections import OrderedDict from baseindicator import BaseIndicator class EMA(BaseIndicator): def", "ema = self.data[-1] ema1 = self.data[-2] slope = None for k in range(-1,-10,-1):", "\"values\": data }] def get_ema(self): if self.csdata is not None: try: smetric =", "res def format_view(self): newres = dict(self.analysis[\"analysis\"]) newres[\"slope\"] = \"{:.4f}\".format(newres[\"slope\"]) newres[\"ema\"] = \"{:.8f}\".format(newres[\"ema\"]) return", "slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis = res return res def format_view(self): newres =", "def get_ema(self): if self.csdata is not None: try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data", "self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\": self.data[i], })", "}) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\": data", "isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\":", "ema1 = self.data[-2] slope = None for k in range(-1,-10,-1): if slope ==", "/ self.data[k] ) last_price = self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action = None if", "closing_time, \"indicator-data\": { \"ema\": ema }, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"]", "= res return res def format_view(self): newres = dict(self.analysis[\"analysis\"]) newres[\"slope\"] = \"{:.4f}\".format(newres[\"slope\"]) newres[\"ema\"]", "])): if isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\":", "[] for i in range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i] > 0:", "= ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis = res return res", "config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data = None self.analysis =", "ex: self.data = None raise ex return self.data def get_analysis(self ): if self.data", "is not None: try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric), self.config[\"period\"])", "= self.csdata[\"time\"][-1] action = None if last_price < ema: action = \"oversold\" res", "= talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data) # scaledown except Exception as ex:", "\"ema\": ema }, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"]", "[\"mediumslateblue\"] self.data = None self.analysis = None self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"]) def", "Exception as ex: self.data = None raise ex return self.data def get_analysis(self ):", "= config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors", "self.data[k] else: slope = slope / ( self.data[k-1] / self.data[k] ) last_price =", "config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data = None", "self.analysis = None self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self): data = []", "\"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\": data }] def get_ema(self): if self.csdata", "= None if last_price < ema: action = \"oversold\" res = { \"weight\":", "= [\"ema\"] self.analysis = res return res def format_view(self): newres = dict(self.analysis[\"analysis\"]) newres[\"slope\"]", "self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({", "= time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\": self.data[i], }) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]),", "data = [] for i in range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i]", "( self.data[k-1] / self.data[k] ) last_price = self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action =", "from baseindicator import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config = {}): config[\"period\"] =", "> 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\": self.data[i], }) return", "ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\": self.data[i], }) return [{ \"key\":", "}] def get_ema(self): if self.csdata is not None: try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]])", "self.data[-2] slope = None for k in range(-1,-10,-1): if slope == None: slope", "{ \"weight\": 2, \"time\": closing_time, \"indicator-data\": { \"ema\": ema }, \"analysis\": OrderedDict() }", "slope = None for k in range(-1,-10,-1): if slope == None: slope =", "talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data) # scaledown except Exception as ex: self.data", "\"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] = [\"ema\"]", "data.append({ \"x\": ts, \"y\": self.data[i], }) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\":", "{ \"ema\": ema }, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action", "self.data[i], }) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\":", "self.csdata[\"time\"][-1] action = None if last_price < ema: action = \"oversold\" res =", "\"oversold\" res = { \"weight\": 2, \"time\": closing_time, \"indicator-data\": { \"ema\": ema },", "config[\"period\"] = config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config)", "= config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data", "os,sys,talib,numpy,math,logging,time,datetime,numbers from collections import OrderedDict from baseindicator import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata,", "def get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self): data = [] for i in range(0,len(self.csdata[", "None if last_price < ema: action = \"oversold\" res = { \"weight\": 2,", "= config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data = None self.analysis", "\"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data = None self.analysis = None self.get_analysis() def", "self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data) # scaledown except Exception", "time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\": self.data[i], }) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\":", "= \"oversold\" res = { \"weight\": 2, \"time\": closing_time, \"indicator-data\": { \"ema\": ema", "\"indicator-data\": { \"ema\": ema }, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] =", "raise ex return self.data def get_analysis(self ): if self.data is None: self.get_ema() ema", "__init__(self,csdata, config = {}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\")", "= self.data[-1] ema1 = self.data[-2] slope = None for k in range(-1,-10,-1): if", "if isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts,", "None self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self): data = [] for i", "= self.data[k-1] / self.data[k] else: slope = slope / ( self.data[k-1] / self.data[k]", "def get_charts(self): data = [] for i in range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number)", "= self.data[-2] slope = None for k in range(-1,-10,-1): if slope == None:", "config = {}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"]", "/ self.data[k] else: slope = slope / ( self.data[k-1] / self.data[k] ) last_price", "res = { \"weight\": 2, \"time\": closing_time, \"indicator-data\": { \"ema\": ema }, \"analysis\":", "= self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action = None if last_price < ema: action", "action = \"oversold\" res = { \"weight\": 2, \"time\": closing_time, \"indicator-data\": { \"ema\":", "< ema: action = \"oversold\" res = { \"weight\": 2, \"time\": closing_time, \"indicator-data\":", "self.data[k] ) last_price = self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action = None if last_price", "None: self.get_ema() ema = self.data[-1] ema1 = self.data[-2] slope = None for k", ") last_price = self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action = None if last_price <", "if self.data is None: self.get_ema() ema = self.data[-1] ema1 = self.data[-2] slope =", "slope / ( self.data[k-1] / self.data[k] ) last_price = self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1]", "get_charts(self): data = [] for i in range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and", "self.data def get_analysis(self ): if self.data is None: self.get_ema() ema = self.data[-1] ema1", "try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data)", "not None: try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data", "= None self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self): data = [] for", "= \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data = None self.analysis = None self.get_analysis()", "self.csdata is not None: try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric),", "def get_analysis(self ): if self.data is None: self.get_ema() ema = self.data[-1] ema1 =", "None self.analysis = None self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self): data =", "= [\"mediumslateblue\"] self.data = None self.analysis = None self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"])", "\"time\": closing_time, \"indicator-data\": { \"ema\": ema }, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings())", "res return res def format_view(self): newres = dict(self.analysis[\"analysis\"]) newres[\"slope\"] = \"{:.4f}\".format(newres[\"slope\"]) newres[\"ema\"] =", "\"x\": ts, \"y\": self.data[i], }) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\",", "get_analysis(self ): if self.data is None: self.get_ema() ema = self.data[-1] ema1 = self.data[-2]", "i in range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts =", "self.data[k-1] / self.data[k] else: slope = slope / ( self.data[k-1] / self.data[k] )", "for i in range(0,len(self.csdata[ self.config[\"metric\"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts", "self.scaledown(data) # scaledown except Exception as ex: self.data = None raise ex return", "None raise ex return self.data def get_analysis(self ): if self.data is None: self.get_ema()", "def format_view(self): newres = dict(self.analysis[\"analysis\"]) newres[\"slope\"] = \"{:.4f}\".format(newres[\"slope\"]) newres[\"ema\"] = \"{:.8f}\".format(newres[\"ema\"]) return newres", "\"yAxis\": 1, \"values\": data }] def get_ema(self): if self.csdata is not None: try:", "= slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis = res return res def format_view(self): newres", "self.chartcolors = [\"mediumslateblue\"] self.data = None self.analysis = None self.get_analysis() def get_settings(self): return", "None for k in range(-1,-10,-1): if slope == None: slope = self.data[k-1] /", "= \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] =", "numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data) # scaledown except Exception as ex: self.data =", "scaledown except Exception as ex: self.data = None raise ex return self.data def", "OrderedDict from baseindicator import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config = {}): config[\"period\"]", "self.data = None raise ex return self.data def get_analysis(self ): if self.data is", "config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors =", "= { \"weight\": 2, \"time\": closing_time, \"indicator-data\": { \"ema\": ema }, \"analysis\": OrderedDict()", "is None: self.get_ema() ema = self.data[-1] ema1 = self.data[-2] slope = None for", "self.data = None self.analysis = None self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self):", "class EMA(BaseIndicator): def __init__(self,csdata, config = {}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"] = config.get(\"metric\",\"closed\")", "None: try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data =", "0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\": self.data[i], }) return [{", "ts, \"y\": self.data[i], }) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\":", "\"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\": data }] def get_ema(self): if self.csdata is not", "BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data = None self.analysis = None self.get_analysis() def get_settings(self):", "= None self.analysis = None self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self): data", "last_price = self.csdata[\"closed\"][-1] closing_time = self.csdata[\"time\"][-1] action = None if last_price < ema:", "get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self): data = [] for i in range(0,len(self.csdata[ self.config[\"metric\"]", "self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data) # scaledown except", "OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] =", "if last_price < ema: action = \"oversold\" res = { \"weight\": 2, \"time\":", "k in range(-1,-10,-1): if slope == None: slope = self.data[k-1] / self.data[k] else:", "if slope == None: slope = self.data[k-1] / self.data[k] else: slope = slope", "last_price < ema: action = \"oversold\" res = { \"weight\": 2, \"time\": closing_time,", "closing_time = self.csdata[\"time\"][-1] action = None if last_price < ema: action = \"oversold\"", "ema }, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] =", "config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"] self.data =", "and self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata[\"time\"][i], \"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\": self.data[i],", "[\"ema\"] self.analysis = res return res def format_view(self): newres = dict(self.analysis[\"analysis\"]) newres[\"slope\"] =", "2, \"time\": closing_time, \"indicator-data\": { \"ema\": ema }, \"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] =", "None: slope = self.data[k-1] / self.data[k] else: slope = slope / ( self.data[k-1]", "config[\"metric\"] = config.get(\"metric\",\"closed\") config[\"label\"] = config.get(\"label\",\"ema\") config[\"label\"] = \"{}{}\".format(config[\"label\"],config[\"period\"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = [\"mediumslateblue\"]", "self.analysis = res return res def format_view(self): newres = dict(self.analysis[\"analysis\"]) newres[\"slope\"] = \"{:.4f}\".format(newres[\"slope\"])", "if self.csdata is not None: try: smetric = self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA(", "\"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\": data }] def get_ema(self):", "): if self.data is None: self.get_ema() ema = self.data[-1] ema1 = self.data[-2] slope", "\"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\": data }] def get_ema(self): if", "self.get_ema() ema = self.data[-1] ema1 = self.data[-2] slope = None for k in", "return self.data def get_analysis(self ): if self.data is None: self.get_ema() ema = self.data[-1]", "in range(-1,-10,-1): if slope == None: slope = self.data[k-1] / self.data[k] else: slope", "data = talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data) # scaledown except Exception as", "range(-1,-10,-1): if slope == None: slope = self.data[k-1] / self.data[k] else: slope =", "self.get_analysis() def get_settings(self): return \"{}\".format(self.config[\"period\"]) def get_charts(self): data = [] for i in", "\"%Y-%m-%dT%H:%M:%SZ\").timetuple()) data.append({ \"x\": ts, \"y\": self.data[i], }) return [{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\",", "import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config = {}): config[\"period\"] = config.get(\"period\",30) config[\"metric\"]", "self.config[\"period\"]) self.data = self.scaledown(data) # scaledown except Exception as ex: self.data = None", "as ex: self.data = None raise ex return self.data def get_analysis(self ): if", "\"analysis\": OrderedDict() } res[\"analysis\"][\"name\"] = \"{}:{}\".format(self.get_name(),self.get_settings()) res[\"analysis\"][\"signal\"] = action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"]", "= action res[\"analysis\"][\"ema\"] = ema res[\"analysis\"][\"slope\"] = slope res[\"analysis\"][\"order\"] = [\"ema\"] self.analysis =", "[{ \"key\": \"{}:{}\".format(self.label,self.config[\"period\"]), \"type\": \"line\", \"color\": \"#FFF5EE\", \"yAxis\": 1, \"values\": data }] def", "= self.scaleup( self.csdata[self.config[\"metric\"]]) data = talib.EMA( numpy.array(smetric), self.config[\"period\"]) self.data = self.scaledown(data) # scaledown", "baseindicator import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config = {}): config[\"period\"] = config.get(\"period\",30)", "slope == None: slope = self.data[k-1] / self.data[k] else: slope = slope /", "collections import OrderedDict from baseindicator import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config =", "self.data = self.scaledown(data) # scaledown except Exception as ex: self.data = None raise", "1, \"values\": data }] def get_ema(self): if self.csdata is not None: try: smetric" ]