file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
shipping_container.py
# -*- coding: utf-8 -*- # © 2016 Comunitea - Kiko Sanchez <kiko@comunitea.com> # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0. from odoo import api, fields, models, _ import odoo.addons.decimal_precision as dp class ShippingContainerType(models.Model): _name = "shipping.container.type" name = fields.Char("Container type", required=True) volume = fields.Float("Volumen", help="Container volume (m3)", required=True) length = fields.Float("Length", help="Length(m)") height = fields.Float("Height", help="Height(m)") width = fields.Float("Width", help="Width(m)") @api.onchange('length', 'height', 'width') def onchange_dimensions(self): if self.length and self.height and self.width: self.volume = self.length * self.height * self.width class ShippingContainer(models.Model): _name = "shipping.container" @api.one def _get_moves(self): self.move_ids_count = len(self.move_ids) @api.one def _get_partners(self): self.partner_ids = self.picking_ids.partner_id @api.multi def _available_volume(self):
for move in container.move_ids: volume -= move.product_id.volume * move.product_uom_qty weight += move.product_id.weight * move.product_uom_qty container.available_volume = volume container.weight = weight name = fields.Char("Container Ref.", required=True) date_expected = fields.Date("Date expected", required=True) date_shipment = fields.Date("Shipment date") picking_ids = fields.One2many("stock.picking", "shipping_container_id", "Pickings") company_id = fields. \ Many2one("res.company", "Company", required=True, default=lambda self: self.env['res.company']._company_default_get('shipping.container')) harbor_id = fields.Many2one('res.harbor', string="Harbor", required=True) move_ids = fields.One2many('stock.move', 'shipping_container_id', string="Moves") move_ids_count = fields.Integer('Move ids count', compute="_get_moves") harbor_dest_id = fields.Many2one('res.harbor', string="Dest. harbor") state = fields.Selection([('loading', 'Loading'), ('transit', 'Transit'), ('destination', 'Destination')], default='loading') shipping_container_type_id = fields.Many2one('shipping.container.type', 'Type') available_volume = fields.Float("Available volume (m3)", compute="_available_volume") weight = fields.Float("Weight (kgr.)", compute="_available_volume") incoterm_id = fields.Many2one('stock.incoterms', string='Incoterm') _sql_constraints = [ ('name_uniq', 'unique(name)', 'Container name must be unique') ] @api.multi def action_view_move_ids(self): action = self.env.ref( 'shipping_container.container_picking_tree_action').read()[0] action['domain'] = [('id', 'in', self.move_ids.ids)] return action def set_transit(self): self.state = 'transit' def set_destination(self): self.state = 'destination' def set_loading(self): self.state = 'loading' @api.multi def write(self, vals): if vals.get('date_expected', False): for container in self: if vals['date_expected'] != container.date_expected: for pick in container.picking_ids: pick.min_date = vals['date_expected'] return super(ShippingContainer, self).write(vals)
for container in self: volume = container.shipping_container_type_id.volume weight = 0.00
random_line_split
shipping_container.py
# -*- coding: utf-8 -*- # © 2016 Comunitea - Kiko Sanchez <kiko@comunitea.com> # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0. from odoo import api, fields, models, _ import odoo.addons.decimal_precision as dp class ShippingContainerType(models.Model): _name = "shipping.container.type" name = fields.Char("Container type", required=True) volume = fields.Float("Volumen", help="Container volume (m3)", required=True) length = fields.Float("Length", help="Length(m)") height = fields.Float("Height", help="Height(m)") width = fields.Float("Width", help="Width(m)") @api.onchange('length', 'height', 'width') def onchange_dimensions(self): if self.length and self.height and self.width: self.volume = self.length * self.height * self.width class ShippingContainer(models.Model): _name = "shipping.container" @api.one def _get_moves(self): self.move_ids_count = len(self.move_ids) @api.one def _get_partners(self): self.partner_ids = self.picking_ids.partner_id @api.multi def _available_volume(self): for container in self: volume = container.shipping_container_type_id.volume weight = 0.00 for move in container.move_ids: volume -= move.product_id.volume * move.product_uom_qty weight += move.product_id.weight * move.product_uom_qty container.available_volume = volume container.weight = weight name = fields.Char("Container Ref.", required=True) date_expected = fields.Date("Date expected", required=True) date_shipment = fields.Date("Shipment date") picking_ids = fields.One2many("stock.picking", "shipping_container_id", "Pickings") company_id = fields. \ Many2one("res.company", "Company", required=True, default=lambda self: self.env['res.company']._company_default_get('shipping.container')) harbor_id = fields.Many2one('res.harbor', string="Harbor", required=True) move_ids = fields.One2many('stock.move', 'shipping_container_id', string="Moves") move_ids_count = fields.Integer('Move ids count', compute="_get_moves") harbor_dest_id = fields.Many2one('res.harbor', string="Dest. harbor") state = fields.Selection([('loading', 'Loading'), ('transit', 'Transit'), ('destination', 'Destination')], default='loading') shipping_container_type_id = fields.Many2one('shipping.container.type', 'Type') available_volume = fields.Float("Available volume (m3)", compute="_available_volume") weight = fields.Float("Weight (kgr.)", compute="_available_volume") incoterm_id = fields.Many2one('stock.incoterms', string='Incoterm') _sql_constraints = [ ('name_uniq', 'unique(name)', 'Container name must be unique') ] @api.multi def action_view_move_ids(self): action = self.env.ref( 'shipping_container.container_picking_tree_action').read()[0] action['domain'] = [('id', 'in', self.move_ids.ids)] return action def set_transit(self): self.state = 'transit' def set_destination(self): self.state = 'destination' def set_loading(self): self.state = 'loading' @api.multi def write(self, vals): if vals.get('date_expected', False): for container in self: if vals['date_expected'] != container.date_expected: f
return super(ShippingContainer, self).write(vals)
or pick in container.picking_ids: pick.min_date = vals['date_expected']
conditional_block
shipping_container.py
# -*- coding: utf-8 -*- # © 2016 Comunitea - Kiko Sanchez <kiko@comunitea.com> # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0. from odoo import api, fields, models, _ import odoo.addons.decimal_precision as dp class ShippingContainerType(models.Model): _name = "shipping.container.type" name = fields.Char("Container type", required=True) volume = fields.Float("Volumen", help="Container volume (m3)", required=True) length = fields.Float("Length", help="Length(m)") height = fields.Float("Height", help="Height(m)") width = fields.Float("Width", help="Width(m)") @api.onchange('length', 'height', 'width') def onchange_dimensions(self): if self.length and self.height and self.width: self.volume = self.length * self.height * self.width class ShippingContainer(models.Model): _name = "shipping.container" @api.one def _get_moves(self): self.move_ids_count = len(self.move_ids) @api.one def _get_partners(self): self.partner_ids = self.picking_ids.partner_id @api.multi def _available_volume(self): for container in self: volume = container.shipping_container_type_id.volume weight = 0.00 for move in container.move_ids: volume -= move.product_id.volume * move.product_uom_qty weight += move.product_id.weight * move.product_uom_qty container.available_volume = volume container.weight = weight name = fields.Char("Container Ref.", required=True) date_expected = fields.Date("Date expected", required=True) date_shipment = fields.Date("Shipment date") picking_ids = fields.One2many("stock.picking", "shipping_container_id", "Pickings") company_id = fields. \ Many2one("res.company", "Company", required=True, default=lambda self: self.env['res.company']._company_default_get('shipping.container')) harbor_id = fields.Many2one('res.harbor', string="Harbor", required=True) move_ids = fields.One2many('stock.move', 'shipping_container_id', string="Moves") move_ids_count = fields.Integer('Move ids count', compute="_get_moves") harbor_dest_id = fields.Many2one('res.harbor', string="Dest. harbor") state = fields.Selection([('loading', 'Loading'), ('transit', 'Transit'), ('destination', 'Destination')], default='loading') shipping_container_type_id = fields.Many2one('shipping.container.type', 'Type') available_volume = fields.Float("Available volume (m3)", compute="_available_volume") weight = fields.Float("Weight (kgr.)", compute="_available_volume") incoterm_id = fields.Many2one('stock.incoterms', string='Incoterm') _sql_constraints = [ ('name_uniq', 'unique(name)', 'Container name must be unique') ] @api.multi def action_view_move_ids(self): action = self.env.ref( 'shipping_container.container_picking_tree_action').read()[0] action['domain'] = [('id', 'in', self.move_ids.ids)] return action def set_transit(self): self.state = 'transit' def s
self): self.state = 'destination' def set_loading(self): self.state = 'loading' @api.multi def write(self, vals): if vals.get('date_expected', False): for container in self: if vals['date_expected'] != container.date_expected: for pick in container.picking_ids: pick.min_date = vals['date_expected'] return super(ShippingContainer, self).write(vals)
et_destination(
identifier_name
messageevent.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::EventBinding::EventMethods; use dom::bindings::codegen::Bindings::MessageEventBinding; use dom::bindings::codegen::Bindings::MessageEventBinding::MessageEventMethods; use dom::bindings::error::Fallible; use dom::bindings::inheritance::Castable; use dom::bindings::js::Root; use dom::bindings::reflector::reflect_dom_object; use dom::bindings::str::DOMString; use dom::event::Event; use dom::eventtarget::EventTarget; use dom::globalscope::GlobalScope; use js::jsapi::{HandleValue, Heap, JSContext}; use js::jsval::JSVal; use servo_atoms::Atom; #[dom_struct] pub struct MessageEvent { event: Event, data: Heap<JSVal>, origin: DOMString, lastEventId: DOMString, } impl MessageEvent { pub fn new_uninitialized(global: &GlobalScope) -> Root<MessageEvent> { MessageEvent::new_initialized(global, HandleValue::undefined(), DOMString::new(), DOMString::new()) } pub fn new_initialized(global: &GlobalScope, data: HandleValue, origin: DOMString, lastEventId: DOMString) -> Root<MessageEvent> { let ev = box MessageEvent { event: Event::new_inherited(), data: Heap::new(data.get()), origin: origin, lastEventId: lastEventId, }; reflect_dom_object(ev, global, MessageEventBinding::Wrap) } pub fn new(global: &GlobalScope, type_: Atom, bubbles: bool, cancelable: bool, data: HandleValue, origin: DOMString, lastEventId: DOMString) -> Root<MessageEvent> { let ev = MessageEvent::new_initialized(global, data, origin, lastEventId); { let event = ev.upcast::<Event>(); event.init_event(type_, bubbles, cancelable); } ev } pub fn Constructor(global: &GlobalScope, type_: DOMString, init: &MessageEventBinding::MessageEventInit) -> Fallible<Root<MessageEvent>> { // Dictionaries need to be rooted // https://github.com/servo/servo/issues/6381 rooted!(in(global.get_cx()) let data = init.data); let ev = MessageEvent::new(global, Atom::from(type_), init.parent.bubbles, init.parent.cancelable, data.handle(), init.origin.clone(), init.lastEventId.clone()); Ok(ev) } } impl MessageEvent { pub fn dispatch_jsval(target: &EventTarget, scope: &GlobalScope, message: HandleValue) { let messageevent = MessageEvent::new( scope,
false, false, message, DOMString::new(), DOMString::new()); messageevent.upcast::<Event>().fire(target); } } impl MessageEventMethods for MessageEvent { #[allow(unsafe_code)] // https://html.spec.whatwg.org/multipage/#dom-messageevent-data unsafe fn Data(&self, _cx: *mut JSContext) -> JSVal { self.data.get() } // https://html.spec.whatwg.org/multipage/#dom-messageevent-origin fn Origin(&self) -> DOMString { self.origin.clone() } // https://html.spec.whatwg.org/multipage/#dom-messageevent-lasteventid fn LastEventId(&self) -> DOMString { self.lastEventId.clone() } // https://dom.spec.whatwg.org/#dom-event-istrusted fn IsTrusted(&self) -> bool { self.event.IsTrusted() } }
atom!("message"),
random_line_split
messageevent.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::EventBinding::EventMethods; use dom::bindings::codegen::Bindings::MessageEventBinding; use dom::bindings::codegen::Bindings::MessageEventBinding::MessageEventMethods; use dom::bindings::error::Fallible; use dom::bindings::inheritance::Castable; use dom::bindings::js::Root; use dom::bindings::reflector::reflect_dom_object; use dom::bindings::str::DOMString; use dom::event::Event; use dom::eventtarget::EventTarget; use dom::globalscope::GlobalScope; use js::jsapi::{HandleValue, Heap, JSContext}; use js::jsval::JSVal; use servo_atoms::Atom; #[dom_struct] pub struct MessageEvent { event: Event, data: Heap<JSVal>, origin: DOMString, lastEventId: DOMString, } impl MessageEvent { pub fn new_uninitialized(global: &GlobalScope) -> Root<MessageEvent> { MessageEvent::new_initialized(global, HandleValue::undefined(), DOMString::new(), DOMString::new()) } pub fn
(global: &GlobalScope, data: HandleValue, origin: DOMString, lastEventId: DOMString) -> Root<MessageEvent> { let ev = box MessageEvent { event: Event::new_inherited(), data: Heap::new(data.get()), origin: origin, lastEventId: lastEventId, }; reflect_dom_object(ev, global, MessageEventBinding::Wrap) } pub fn new(global: &GlobalScope, type_: Atom, bubbles: bool, cancelable: bool, data: HandleValue, origin: DOMString, lastEventId: DOMString) -> Root<MessageEvent> { let ev = MessageEvent::new_initialized(global, data, origin, lastEventId); { let event = ev.upcast::<Event>(); event.init_event(type_, bubbles, cancelable); } ev } pub fn Constructor(global: &GlobalScope, type_: DOMString, init: &MessageEventBinding::MessageEventInit) -> Fallible<Root<MessageEvent>> { // Dictionaries need to be rooted // https://github.com/servo/servo/issues/6381 rooted!(in(global.get_cx()) let data = init.data); let ev = MessageEvent::new(global, Atom::from(type_), init.parent.bubbles, init.parent.cancelable, data.handle(), init.origin.clone(), init.lastEventId.clone()); Ok(ev) } } impl MessageEvent { pub fn dispatch_jsval(target: &EventTarget, scope: &GlobalScope, message: HandleValue) { let messageevent = MessageEvent::new( scope, atom!("message"), false, false, message, DOMString::new(), DOMString::new()); messageevent.upcast::<Event>().fire(target); } } impl MessageEventMethods for MessageEvent { #[allow(unsafe_code)] // https://html.spec.whatwg.org/multipage/#dom-messageevent-data unsafe fn Data(&self, _cx: *mut JSContext) -> JSVal { self.data.get() } // https://html.spec.whatwg.org/multipage/#dom-messageevent-origin fn Origin(&self) -> DOMString { self.origin.clone() } // https://html.spec.whatwg.org/multipage/#dom-messageevent-lasteventid fn LastEventId(&self) -> DOMString { self.lastEventId.clone() } // https://dom.spec.whatwg.org/#dom-event-istrusted fn IsTrusted(&self) -> bool { self.event.IsTrusted() } }
new_initialized
identifier_name
messageevent.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::EventBinding::EventMethods; use dom::bindings::codegen::Bindings::MessageEventBinding; use dom::bindings::codegen::Bindings::MessageEventBinding::MessageEventMethods; use dom::bindings::error::Fallible; use dom::bindings::inheritance::Castable; use dom::bindings::js::Root; use dom::bindings::reflector::reflect_dom_object; use dom::bindings::str::DOMString; use dom::event::Event; use dom::eventtarget::EventTarget; use dom::globalscope::GlobalScope; use js::jsapi::{HandleValue, Heap, JSContext}; use js::jsval::JSVal; use servo_atoms::Atom; #[dom_struct] pub struct MessageEvent { event: Event, data: Heap<JSVal>, origin: DOMString, lastEventId: DOMString, } impl MessageEvent { pub fn new_uninitialized(global: &GlobalScope) -> Root<MessageEvent> { MessageEvent::new_initialized(global, HandleValue::undefined(), DOMString::new(), DOMString::new()) } pub fn new_initialized(global: &GlobalScope, data: HandleValue, origin: DOMString, lastEventId: DOMString) -> Root<MessageEvent> { let ev = box MessageEvent { event: Event::new_inherited(), data: Heap::new(data.get()), origin: origin, lastEventId: lastEventId, }; reflect_dom_object(ev, global, MessageEventBinding::Wrap) } pub fn new(global: &GlobalScope, type_: Atom, bubbles: bool, cancelable: bool, data: HandleValue, origin: DOMString, lastEventId: DOMString) -> Root<MessageEvent> { let ev = MessageEvent::new_initialized(global, data, origin, lastEventId); { let event = ev.upcast::<Event>(); event.init_event(type_, bubbles, cancelable); } ev } pub fn Constructor(global: &GlobalScope, type_: DOMString, init: &MessageEventBinding::MessageEventInit) -> Fallible<Root<MessageEvent>> { // Dictionaries need to be rooted // https://github.com/servo/servo/issues/6381 rooted!(in(global.get_cx()) let data = init.data); let ev = MessageEvent::new(global, Atom::from(type_), init.parent.bubbles, init.parent.cancelable, data.handle(), init.origin.clone(), init.lastEventId.clone()); Ok(ev) } } impl MessageEvent { pub fn dispatch_jsval(target: &EventTarget, scope: &GlobalScope, message: HandleValue) { let messageevent = MessageEvent::new( scope, atom!("message"), false, false, message, DOMString::new(), DOMString::new()); messageevent.upcast::<Event>().fire(target); } } impl MessageEventMethods for MessageEvent { #[allow(unsafe_code)] // https://html.spec.whatwg.org/multipage/#dom-messageevent-data unsafe fn Data(&self, _cx: *mut JSContext) -> JSVal { self.data.get() } // https://html.spec.whatwg.org/multipage/#dom-messageevent-origin fn Origin(&self) -> DOMString { self.origin.clone() } // https://html.spec.whatwg.org/multipage/#dom-messageevent-lasteventid fn LastEventId(&self) -> DOMString
// https://dom.spec.whatwg.org/#dom-event-istrusted fn IsTrusted(&self) -> bool { self.event.IsTrusted() } }
{ self.lastEventId.clone() }
identifier_body
schemas-plugin.ts
/// <reference path="../../typings/node/node.d.ts"/> /// <reference path="../../typings/schemas-files-service/schemas-plugin.d.ts"/> /// <reference path="../../typings/schemas-files-service/schemas-protocol.d.ts"/> /// <reference path="../../typings/tv4-via-typenames-node/tv4-via-typenames-node.d.ts"/> // Assume express is using validation of the msg via json-schema import fs = require('fs'); import tv4vtn = require('tv4-via-typenames-node'); import SchemaFiles = tv4vtn.SchemaFiles; import SchemasPlugin = require('schemas-plugin'); function schemas( options: SchemasPlugin.Options ) { var schema_files: SchemaFiles; this.add( 'init:schemas', init ); function
(msg, respond) { schema_files = new SchemaFiles({ schemasDir: options.schemasDir}); var init_promise = schema_files.init(); var schemas_ready_promise = init_promise.then(function (result) { let filenames = fs.readdirSync(options.schemasDir); let typenames = []; filenames.forEach((filename) => { var typename = schema_files.test.getTypenameFromSchemaFilename(filename); if (typename) { typenames.push(typename); } }); if (typenames.length == 0) { respond(new Error('No schema found in schemasDir=' + options.schemasDir)); } return schema_files.loadRequiredSchema(typenames); }); schemas_ready_promise.then((result) => { respond(); }) .catch((error) => { respond(error); }) } this.add( 'role:schemas,action:read', (msg : SchemasProtocol.SchemasRequest, respond) => { if (!('typename' in msg)) { respond(null, {error: 'expected msg.typename'}); } else { let schema = schema_files.test.getLoadedSchema(msg.typename); if (schema) { respond(null, {schema}); } else { respond(null, {error: 'no schema for typename=' + msg.typename}); } } }) } export = schemas
init
identifier_name
schemas-plugin.ts
/// <reference path="../../typings/node/node.d.ts"/> /// <reference path="../../typings/schemas-files-service/schemas-plugin.d.ts"/> /// <reference path="../../typings/schemas-files-service/schemas-protocol.d.ts"/> /// <reference path="../../typings/tv4-via-typenames-node/tv4-via-typenames-node.d.ts"/> // Assume express is using validation of the msg via json-schema import fs = require('fs'); import tv4vtn = require('tv4-via-typenames-node'); import SchemaFiles = tv4vtn.SchemaFiles; import SchemasPlugin = require('schemas-plugin'); function schemas( options: SchemasPlugin.Options )
export = schemas
{ var schema_files: SchemaFiles; this.add( 'init:schemas', init ); function init(msg, respond) { schema_files = new SchemaFiles({ schemasDir: options.schemasDir}); var init_promise = schema_files.init(); var schemas_ready_promise = init_promise.then(function (result) { let filenames = fs.readdirSync(options.schemasDir); let typenames = []; filenames.forEach((filename) => { var typename = schema_files.test.getTypenameFromSchemaFilename(filename); if (typename) { typenames.push(typename); } }); if (typenames.length == 0) { respond(new Error('No schema found in schemasDir=' + options.schemasDir)); } return schema_files.loadRequiredSchema(typenames); }); schemas_ready_promise.then((result) => { respond(); }) .catch((error) => { respond(error); }) } this.add( 'role:schemas,action:read', (msg : SchemasProtocol.SchemasRequest, respond) => { if (!('typename' in msg)) { respond(null, {error: 'expected msg.typename'}); } else { let schema = schema_files.test.getLoadedSchema(msg.typename); if (schema) { respond(null, {schema}); } else { respond(null, {error: 'no schema for typename=' + msg.typename}); } } }) }
identifier_body
schemas-plugin.ts
/// <reference path="../../typings/node/node.d.ts"/> /// <reference path="../../typings/schemas-files-service/schemas-plugin.d.ts"/> /// <reference path="../../typings/schemas-files-service/schemas-protocol.d.ts"/>
import tv4vtn = require('tv4-via-typenames-node'); import SchemaFiles = tv4vtn.SchemaFiles; import SchemasPlugin = require('schemas-plugin'); function schemas( options: SchemasPlugin.Options ) { var schema_files: SchemaFiles; this.add( 'init:schemas', init ); function init(msg, respond) { schema_files = new SchemaFiles({ schemasDir: options.schemasDir}); var init_promise = schema_files.init(); var schemas_ready_promise = init_promise.then(function (result) { let filenames = fs.readdirSync(options.schemasDir); let typenames = []; filenames.forEach((filename) => { var typename = schema_files.test.getTypenameFromSchemaFilename(filename); if (typename) { typenames.push(typename); } }); if (typenames.length == 0) { respond(new Error('No schema found in schemasDir=' + options.schemasDir)); } return schema_files.loadRequiredSchema(typenames); }); schemas_ready_promise.then((result) => { respond(); }) .catch((error) => { respond(error); }) } this.add( 'role:schemas,action:read', (msg : SchemasProtocol.SchemasRequest, respond) => { if (!('typename' in msg)) { respond(null, {error: 'expected msg.typename'}); } else { let schema = schema_files.test.getLoadedSchema(msg.typename); if (schema) { respond(null, {schema}); } else { respond(null, {error: 'no schema for typename=' + msg.typename}); } } }) } export = schemas
/// <reference path="../../typings/tv4-via-typenames-node/tv4-via-typenames-node.d.ts"/> // Assume express is using validation of the msg via json-schema import fs = require('fs');
random_line_split
schemas-plugin.ts
/// <reference path="../../typings/node/node.d.ts"/> /// <reference path="../../typings/schemas-files-service/schemas-plugin.d.ts"/> /// <reference path="../../typings/schemas-files-service/schemas-protocol.d.ts"/> /// <reference path="../../typings/tv4-via-typenames-node/tv4-via-typenames-node.d.ts"/> // Assume express is using validation of the msg via json-schema import fs = require('fs'); import tv4vtn = require('tv4-via-typenames-node'); import SchemaFiles = tv4vtn.SchemaFiles; import SchemasPlugin = require('schemas-plugin'); function schemas( options: SchemasPlugin.Options ) { var schema_files: SchemaFiles; this.add( 'init:schemas', init ); function init(msg, respond) { schema_files = new SchemaFiles({ schemasDir: options.schemasDir}); var init_promise = schema_files.init(); var schemas_ready_promise = init_promise.then(function (result) { let filenames = fs.readdirSync(options.schemasDir); let typenames = []; filenames.forEach((filename) => { var typename = schema_files.test.getTypenameFromSchemaFilename(filename); if (typename) { typenames.push(typename); } }); if (typenames.length == 0) { respond(new Error('No schema found in schemasDir=' + options.schemasDir)); } return schema_files.loadRequiredSchema(typenames); }); schemas_ready_promise.then((result) => { respond(); }) .catch((error) => { respond(error); }) } this.add( 'role:schemas,action:read', (msg : SchemasProtocol.SchemasRequest, respond) => { if (!('typename' in msg)) { respond(null, {error: 'expected msg.typename'}); } else
}) } export = schemas
{ let schema = schema_files.test.getLoadedSchema(msg.typename); if (schema) { respond(null, {schema}); } else { respond(null, {error: 'no schema for typename=' + msg.typename}); } }
conditional_block
test_currency.py
# -*- coding: utf-8 -*- import pytest import six from sqlalchemy_utils import Currency, i18n @pytest.fixture def set_get_locale(): i18n.get_locale = lambda: i18n.babel.Locale('en') @pytest.mark.skipif('i18n.babel is None') @pytest.mark.usefixtures('set_get_locale') class TestCurrency(object): def test_init(self): assert Currency('USD') == Currency(Currency('USD')) def test_hashability(self): assert len(set([Currency('USD'), Currency('USD')])) == 1 def test_invalid_currency_code(self): with pytest.raises(ValueError): Currency('Unknown code') def test_invalid_currency_code_type(self): with pytest.raises(TypeError): Currency(None) @pytest.mark.parametrize( ('code', 'name'), ( ('USD', 'US Dollar'), ('EUR', 'Euro') ) ) def test_name_property(self, code, name): assert Currency(code).name == name @pytest.mark.parametrize( ('code', 'symbol'), ( ('USD', u'$'), ('EUR', u'€') ) ) def test_symbol_property(self, code, symbol): as
def test_equality_operator(self): assert Currency('USD') == 'USD' assert 'USD' == Currency('USD') assert Currency('USD') == Currency('USD') def test_non_equality_operator(self): assert Currency('USD') != 'EUR' assert not (Currency('USD') != 'USD') def test_unicode(self): currency = Currency('USD') assert six.text_type(currency) == u'USD' def test_str(self): currency = Currency('USD') assert str(currency) == 'USD' def test_representation(self): currency = Currency('USD') assert repr(currency) == "Currency('USD')"
sert Currency(code).symbol == symbol
identifier_body
test_currency.py
# -*- coding: utf-8 -*- import pytest import six from sqlalchemy_utils import Currency, i18n @pytest.fixture def set_get_locale(): i18n.get_locale = lambda: i18n.babel.Locale('en') @pytest.mark.skipif('i18n.babel is None') @pytest.mark.usefixtures('set_get_locale') class TestCurrency(object): def test_init(self): assert Currency('USD') == Currency(Currency('USD')) def test_hashability(self): assert len(set([Currency('USD'), Currency('USD')])) == 1 def test_invalid_currency_code(self): with pytest.raises(ValueError): Currency('Unknown code') def test_invalid_currency_code_type(self): with pytest.raises(TypeError): Currency(None) @pytest.mark.parametrize( ('code', 'name'), ( ('USD', 'US Dollar'), ('EUR', 'Euro') ) ) def test_name_property(self, code, name): assert Currency(code).name == name @pytest.mark.parametrize( ('code', 'symbol'),
('USD', u'$'), ('EUR', u'€') ) ) def test_symbol_property(self, code, symbol): assert Currency(code).symbol == symbol def test_equality_operator(self): assert Currency('USD') == 'USD' assert 'USD' == Currency('USD') assert Currency('USD') == Currency('USD') def test_non_equality_operator(self): assert Currency('USD') != 'EUR' assert not (Currency('USD') != 'USD') def test_unicode(self): currency = Currency('USD') assert six.text_type(currency) == u'USD' def test_str(self): currency = Currency('USD') assert str(currency) == 'USD' def test_representation(self): currency = Currency('USD') assert repr(currency) == "Currency('USD')"
(
random_line_split
test_currency.py
# -*- coding: utf-8 -*- import pytest import six from sqlalchemy_utils import Currency, i18n @pytest.fixture def set_get_locale(): i18n.get_locale = lambda: i18n.babel.Locale('en') @pytest.mark.skipif('i18n.babel is None') @pytest.mark.usefixtures('set_get_locale') class TestCurrency(object): def test_init(self): assert Currency('USD') == Currency(Currency('USD')) def test_hashability(self): assert len(set([Currency('USD'), Currency('USD')])) == 1 def test_invalid_currency_code(self): with pytest.raises(ValueError): Currency('Unknown code') def test_invalid_currency_code_type(self): with pytest.raises(TypeError): Currency(None) @pytest.mark.parametrize( ('code', 'name'), ( ('USD', 'US Dollar'), ('EUR', 'Euro') ) ) def test_name_property(self, code, name): assert Currency(code).name == name @pytest.mark.parametrize( ('code', 'symbol'), ( ('USD', u'$'), ('EUR', u'€') ) ) def test_symbol_property(self, code, symbol): assert Currency(code).symbol == symbol def te
elf): assert Currency('USD') == 'USD' assert 'USD' == Currency('USD') assert Currency('USD') == Currency('USD') def test_non_equality_operator(self): assert Currency('USD') != 'EUR' assert not (Currency('USD') != 'USD') def test_unicode(self): currency = Currency('USD') assert six.text_type(currency) == u'USD' def test_str(self): currency = Currency('USD') assert str(currency) == 'USD' def test_representation(self): currency = Currency('USD') assert repr(currency) == "Currency('USD')"
st_equality_operator(s
identifier_name
buffer.rs
use crate::parsing::ParsingContext; use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext}; use futures_lite::io::{AsyncRead, AsyncWrite}; use std::{ cmp, io::{self, IoSlice, IoSliceMut}, pin::Pin, task::{Context, Poll}, }; #[derive(Debug, PartialEq, Clone)] pub(crate) struct Buffer { memory: Vec<u8>, capacity: usize, position: usize, end: usize, available_data: usize, } pub(crate) struct Checkpoint { end: usize, backwards: bool, } impl Buffer { pub(crate) fn with_capacity(capacity: usize) -> Buffer { Buffer { memory: vec![0; capacity], capacity, position: 0, end: 0, available_data: 0, } } pub(crate) fn checkpoint(&self) -> Checkpoint { Checkpoint { end: self.end, backwards: true, } } pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) { if checkpoint.end == self.end { return; } if checkpoint.backwards { if self.end > checkpoint.end { self.available_data -= self.end - checkpoint.end; } else { self.available_data -= self.end + (self.capacity - checkpoint.end); } } else if self.end > checkpoint.end { self.available_data += (self.capacity - self.end) + checkpoint.end; } else { self.available_data += checkpoint.end - self.end; } self.end = checkpoint.end; } pub(crate) fn grow(&mut self, new_size: usize) -> bool { if self.capacity >= new_size { return false; } let old_capacity = self.capacity; let growth = new_size - old_capacity; self.memory.resize(new_size, 0); self.capacity = new_size; if self.end <= self.position && self.available_data > 0 { // We have data and the "tail" was at the beginning of the buffer. // We need to move it in the new end. let (old, new) = self.memory.split_at_mut(old_capacity); if self.end < growth { // There is enough room in the new end for this whole "tail". new[..].copy_from_slice(&old[..self.end]); self.end += old_capacity; } else { // Fill the new end with as much data as we can. // We also update the end pointer to the future right location. // We still have [growth..old_end] to move into [..new_end] new[..].copy_from_slice(&old[..growth]); self.end -= growth; if self.end < growth { // Less than half the data is yet to be moved, we can split + copy. let (start, data) = self.memory.split_at_mut(growth); start[..].copy_from_slice(&data[..self.end]) } else { // Not enough room to split + copy, we copy each byte one at a time. for i in 0..=self.end { self.memory[i] = self.memory[i + growth]; } } } } true } pub(crate) fn available_data(&self) -> usize { self.available_data } pub(crate) fn available_space(&self) -> usize { self.capacity - self.available_data } pub(crate) fn consume(&mut self, count: usize) -> usize { let cnt = cmp::min(count, self.available_data()); self.position += cnt; self.position %= self.capacity; self.available_data -= cnt; cnt } pub(crate) fn fill(&mut self, count: usize) -> usize { let cnt = cmp::min(count, self.available_space()); self.end += cnt; self.end %= self.capacity; self.available_data += cnt; cnt } pub(crate) fn poll_write_to<T: AsyncWrite>( &self, cx: &mut Context<'_>, writer: Pin<&mut T>, ) -> Poll<io::Result<usize>> { if self.available_data() == 0 { Poll::Ready(Ok(0)) } else if self.end > self.position { writer.poll_write(cx, &self.memory[self.position..self.end]) } else { writer.poll_write_vectored( cx, &[ IoSlice::new(&self.memory[self.position..]), IoSlice::new(&self.memory[..self.end]), ], ) } } pub(crate) fn poll_read_from<T: AsyncRead>( &mut self, cx: &mut Context<'_>, reader: Pin<&mut T>, ) -> Poll<io::Result<usize>> { if self.available_space() == 0 { Poll::Ready(Ok(0)) } else if self.end >= self.position { let (start, end) = self.memory.split_at_mut(self.end); reader.poll_read_vectored( cx, &mut [ IoSliceMut::new(&mut end[..]), IoSliceMut::new(&mut start[..self.position]), ][..], ) } else { reader.poll_read(cx, &mut self.memory[self.end..self.position]) } } pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize { let data = &self.memory[self.position..self.position]; let dataptr = data.as_ptr() as usize; let bufptr = buf.as_ptr() as usize; if dataptr < bufptr { bufptr - dataptr } else { let start = &self.memory[..0]; let startptr = start.as_ptr() as usize; bufptr + self.capacity - self.position - startptr } } pub(crate) fn parsing_context(&self) -> ParsingContext<'_> { if self.available_data() == 0 { self.memory[self.end..self.end].into() } else if self.end > self.position { self.memory[self.position..self.end].into() } else { [&self.memory[self.position..], &self.memory[..self.end]].into() } } } impl io::Write for &mut Buffer { fn write(&mut self, data: &[u8]) -> io::Result<usize> { let amt = if self.available_space() == 0 { 0 } else if self.end >= self.position { let mut space = &mut self.memory[self.end..]; let mut amt = space.write(data)?; if amt == self.capacity - self.end { let mut space = &mut self.memory[..self.position]; amt += space.write(&data[amt..])?; } amt } else
; self.fill(amt); Ok(amt) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl BackToTheBuffer for &mut Buffer { fn reserve_write_use< Tmp, Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>, Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>, >( s: WriteContext<Self>, reserved: usize, gen: &Gen, before: &Before, ) -> Result<WriteContext<Self>, GenError> { if s.write.available_space() < reserved { return Err(GenError::BufferTooSmall( reserved - s.write.available_space(), )); } let start = s.write.checkpoint(); s.write.fill(reserved); gen(s).and_then(|(s, tmp)| { let mut end = s.write.checkpoint(); end.backwards = false; s.write.rollback(start); before(s, tmp).map(|s| { s.write.rollback(end); s }) }) } }
{ let mut space = &mut self.memory[self.end..self.position]; space.write(data)? }
conditional_block
buffer.rs
use crate::parsing::ParsingContext; use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext}; use futures_lite::io::{AsyncRead, AsyncWrite}; use std::{ cmp, io::{self, IoSlice, IoSliceMut}, pin::Pin, task::{Context, Poll}, }; #[derive(Debug, PartialEq, Clone)] pub(crate) struct Buffer { memory: Vec<u8>, capacity: usize, position: usize, end: usize, available_data: usize, } pub(crate) struct Checkpoint { end: usize, backwards: bool, } impl Buffer { pub(crate) fn with_capacity(capacity: usize) -> Buffer { Buffer { memory: vec![0; capacity], capacity, position: 0, end: 0, available_data: 0, } } pub(crate) fn checkpoint(&self) -> Checkpoint { Checkpoint { end: self.end, backwards: true, } } pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) { if checkpoint.end == self.end { return; } if checkpoint.backwards { if self.end > checkpoint.end { self.available_data -= self.end - checkpoint.end; } else { self.available_data -= self.end + (self.capacity - checkpoint.end); } } else if self.end > checkpoint.end { self.available_data += (self.capacity - self.end) + checkpoint.end; } else { self.available_data += checkpoint.end - self.end; } self.end = checkpoint.end; } pub(crate) fn grow(&mut self, new_size: usize) -> bool { if self.capacity >= new_size { return false; } let old_capacity = self.capacity; let growth = new_size - old_capacity; self.memory.resize(new_size, 0); self.capacity = new_size; if self.end <= self.position && self.available_data > 0 { // We have data and the "tail" was at the beginning of the buffer. // We need to move it in the new end. let (old, new) = self.memory.split_at_mut(old_capacity); if self.end < growth { // There is enough room in the new end for this whole "tail". new[..].copy_from_slice(&old[..self.end]); self.end += old_capacity; } else { // Fill the new end with as much data as we can. // We also update the end pointer to the future right location. // We still have [growth..old_end] to move into [..new_end] new[..].copy_from_slice(&old[..growth]); self.end -= growth; if self.end < growth { // Less than half the data is yet to be moved, we can split + copy. let (start, data) = self.memory.split_at_mut(growth); start[..].copy_from_slice(&data[..self.end]) } else { // Not enough room to split + copy, we copy each byte one at a time. for i in 0..=self.end { self.memory[i] = self.memory[i + growth]; } } } } true } pub(crate) fn available_data(&self) -> usize { self.available_data } pub(crate) fn available_space(&self) -> usize { self.capacity - self.available_data } pub(crate) fn consume(&mut self, count: usize) -> usize { let cnt = cmp::min(count, self.available_data()); self.position += cnt; self.position %= self.capacity; self.available_data -= cnt; cnt } pub(crate) fn fill(&mut self, count: usize) -> usize { let cnt = cmp::min(count, self.available_space()); self.end += cnt; self.end %= self.capacity; self.available_data += cnt; cnt } pub(crate) fn poll_write_to<T: AsyncWrite>( &self, cx: &mut Context<'_>, writer: Pin<&mut T>, ) -> Poll<io::Result<usize>> { if self.available_data() == 0 { Poll::Ready(Ok(0)) } else if self.end > self.position { writer.poll_write(cx, &self.memory[self.position..self.end]) } else { writer.poll_write_vectored( cx, &[ IoSlice::new(&self.memory[self.position..]), IoSlice::new(&self.memory[..self.end]), ], ) } } pub(crate) fn poll_read_from<T: AsyncRead>( &mut self, cx: &mut Context<'_>, reader: Pin<&mut T>, ) -> Poll<io::Result<usize>> { if self.available_space() == 0 { Poll::Ready(Ok(0)) } else if self.end >= self.position { let (start, end) = self.memory.split_at_mut(self.end); reader.poll_read_vectored( cx, &mut [ IoSliceMut::new(&mut end[..]), IoSliceMut::new(&mut start[..self.position]), ][..], ) } else { reader.poll_read(cx, &mut self.memory[self.end..self.position]) } } pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize { let data = &self.memory[self.position..self.position]; let dataptr = data.as_ptr() as usize; let bufptr = buf.as_ptr() as usize; if dataptr < bufptr { bufptr - dataptr } else { let start = &self.memory[..0]; let startptr = start.as_ptr() as usize; bufptr + self.capacity - self.position - startptr } } pub(crate) fn parsing_context(&self) -> ParsingContext<'_> { if self.available_data() == 0 { self.memory[self.end..self.end].into() } else if self.end > self.position { self.memory[self.position..self.end].into() } else { [&self.memory[self.position..], &self.memory[..self.end]].into() } } } impl io::Write for &mut Buffer { fn write(&mut self, data: &[u8]) -> io::Result<usize> { let amt = if self.available_space() == 0 { 0 } else if self.end >= self.position { let mut space = &mut self.memory[self.end..]; let mut amt = space.write(data)?; if amt == self.capacity - self.end { let mut space = &mut self.memory[..self.position]; amt += space.write(&data[amt..])?; } amt } else { let mut space = &mut self.memory[self.end..self.position]; space.write(data)? }; self.fill(amt); Ok(amt) } fn flush(&mut self) -> io::Result<()> { Ok(())
fn reserve_write_use< Tmp, Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>, Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>, >( s: WriteContext<Self>, reserved: usize, gen: &Gen, before: &Before, ) -> Result<WriteContext<Self>, GenError> { if s.write.available_space() < reserved { return Err(GenError::BufferTooSmall( reserved - s.write.available_space(), )); } let start = s.write.checkpoint(); s.write.fill(reserved); gen(s).and_then(|(s, tmp)| { let mut end = s.write.checkpoint(); end.backwards = false; s.write.rollback(start); before(s, tmp).map(|s| { s.write.rollback(end); s }) }) } }
} } impl BackToTheBuffer for &mut Buffer {
random_line_split
buffer.rs
use crate::parsing::ParsingContext; use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext}; use futures_lite::io::{AsyncRead, AsyncWrite}; use std::{ cmp, io::{self, IoSlice, IoSliceMut}, pin::Pin, task::{Context, Poll}, }; #[derive(Debug, PartialEq, Clone)] pub(crate) struct Buffer { memory: Vec<u8>, capacity: usize, position: usize, end: usize, available_data: usize, } pub(crate) struct Checkpoint { end: usize, backwards: bool, } impl Buffer { pub(crate) fn with_capacity(capacity: usize) -> Buffer { Buffer { memory: vec![0; capacity], capacity, position: 0, end: 0, available_data: 0, } } pub(crate) fn checkpoint(&self) -> Checkpoint { Checkpoint { end: self.end, backwards: true, } } pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) { if checkpoint.end == self.end { return; } if checkpoint.backwards { if self.end > checkpoint.end { self.available_data -= self.end - checkpoint.end; } else { self.available_data -= self.end + (self.capacity - checkpoint.end); } } else if self.end > checkpoint.end { self.available_data += (self.capacity - self.end) + checkpoint.end; } else { self.available_data += checkpoint.end - self.end; } self.end = checkpoint.end; } pub(crate) fn
(&mut self, new_size: usize) -> bool { if self.capacity >= new_size { return false; } let old_capacity = self.capacity; let growth = new_size - old_capacity; self.memory.resize(new_size, 0); self.capacity = new_size; if self.end <= self.position && self.available_data > 0 { // We have data and the "tail" was at the beginning of the buffer. // We need to move it in the new end. let (old, new) = self.memory.split_at_mut(old_capacity); if self.end < growth { // There is enough room in the new end for this whole "tail". new[..].copy_from_slice(&old[..self.end]); self.end += old_capacity; } else { // Fill the new end with as much data as we can. // We also update the end pointer to the future right location. // We still have [growth..old_end] to move into [..new_end] new[..].copy_from_slice(&old[..growth]); self.end -= growth; if self.end < growth { // Less than half the data is yet to be moved, we can split + copy. let (start, data) = self.memory.split_at_mut(growth); start[..].copy_from_slice(&data[..self.end]) } else { // Not enough room to split + copy, we copy each byte one at a time. for i in 0..=self.end { self.memory[i] = self.memory[i + growth]; } } } } true } pub(crate) fn available_data(&self) -> usize { self.available_data } pub(crate) fn available_space(&self) -> usize { self.capacity - self.available_data } pub(crate) fn consume(&mut self, count: usize) -> usize { let cnt = cmp::min(count, self.available_data()); self.position += cnt; self.position %= self.capacity; self.available_data -= cnt; cnt } pub(crate) fn fill(&mut self, count: usize) -> usize { let cnt = cmp::min(count, self.available_space()); self.end += cnt; self.end %= self.capacity; self.available_data += cnt; cnt } pub(crate) fn poll_write_to<T: AsyncWrite>( &self, cx: &mut Context<'_>, writer: Pin<&mut T>, ) -> Poll<io::Result<usize>> { if self.available_data() == 0 { Poll::Ready(Ok(0)) } else if self.end > self.position { writer.poll_write(cx, &self.memory[self.position..self.end]) } else { writer.poll_write_vectored( cx, &[ IoSlice::new(&self.memory[self.position..]), IoSlice::new(&self.memory[..self.end]), ], ) } } pub(crate) fn poll_read_from<T: AsyncRead>( &mut self, cx: &mut Context<'_>, reader: Pin<&mut T>, ) -> Poll<io::Result<usize>> { if self.available_space() == 0 { Poll::Ready(Ok(0)) } else if self.end >= self.position { let (start, end) = self.memory.split_at_mut(self.end); reader.poll_read_vectored( cx, &mut [ IoSliceMut::new(&mut end[..]), IoSliceMut::new(&mut start[..self.position]), ][..], ) } else { reader.poll_read(cx, &mut self.memory[self.end..self.position]) } } pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize { let data = &self.memory[self.position..self.position]; let dataptr = data.as_ptr() as usize; let bufptr = buf.as_ptr() as usize; if dataptr < bufptr { bufptr - dataptr } else { let start = &self.memory[..0]; let startptr = start.as_ptr() as usize; bufptr + self.capacity - self.position - startptr } } pub(crate) fn parsing_context(&self) -> ParsingContext<'_> { if self.available_data() == 0 { self.memory[self.end..self.end].into() } else if self.end > self.position { self.memory[self.position..self.end].into() } else { [&self.memory[self.position..], &self.memory[..self.end]].into() } } } impl io::Write for &mut Buffer { fn write(&mut self, data: &[u8]) -> io::Result<usize> { let amt = if self.available_space() == 0 { 0 } else if self.end >= self.position { let mut space = &mut self.memory[self.end..]; let mut amt = space.write(data)?; if amt == self.capacity - self.end { let mut space = &mut self.memory[..self.position]; amt += space.write(&data[amt..])?; } amt } else { let mut space = &mut self.memory[self.end..self.position]; space.write(data)? }; self.fill(amt); Ok(amt) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl BackToTheBuffer for &mut Buffer { fn reserve_write_use< Tmp, Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>, Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>, >( s: WriteContext<Self>, reserved: usize, gen: &Gen, before: &Before, ) -> Result<WriteContext<Self>, GenError> { if s.write.available_space() < reserved { return Err(GenError::BufferTooSmall( reserved - s.write.available_space(), )); } let start = s.write.checkpoint(); s.write.fill(reserved); gen(s).and_then(|(s, tmp)| { let mut end = s.write.checkpoint(); end.backwards = false; s.write.rollback(start); before(s, tmp).map(|s| { s.write.rollback(end); s }) }) } }
grow
identifier_name
buffer.rs
use crate::parsing::ParsingContext; use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext}; use futures_lite::io::{AsyncRead, AsyncWrite}; use std::{ cmp, io::{self, IoSlice, IoSliceMut}, pin::Pin, task::{Context, Poll}, }; #[derive(Debug, PartialEq, Clone)] pub(crate) struct Buffer { memory: Vec<u8>, capacity: usize, position: usize, end: usize, available_data: usize, } pub(crate) struct Checkpoint { end: usize, backwards: bool, } impl Buffer { pub(crate) fn with_capacity(capacity: usize) -> Buffer { Buffer { memory: vec![0; capacity], capacity, position: 0, end: 0, available_data: 0, } } pub(crate) fn checkpoint(&self) -> Checkpoint { Checkpoint { end: self.end, backwards: true, } } pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) { if checkpoint.end == self.end { return; } if checkpoint.backwards { if self.end > checkpoint.end { self.available_data -= self.end - checkpoint.end; } else { self.available_data -= self.end + (self.capacity - checkpoint.end); } } else if self.end > checkpoint.end { self.available_data += (self.capacity - self.end) + checkpoint.end; } else { self.available_data += checkpoint.end - self.end; } self.end = checkpoint.end; } pub(crate) fn grow(&mut self, new_size: usize) -> bool { if self.capacity >= new_size { return false; } let old_capacity = self.capacity; let growth = new_size - old_capacity; self.memory.resize(new_size, 0); self.capacity = new_size; if self.end <= self.position && self.available_data > 0 { // We have data and the "tail" was at the beginning of the buffer. // We need to move it in the new end. let (old, new) = self.memory.split_at_mut(old_capacity); if self.end < growth { // There is enough room in the new end for this whole "tail". new[..].copy_from_slice(&old[..self.end]); self.end += old_capacity; } else { // Fill the new end with as much data as we can. // We also update the end pointer to the future right location. // We still have [growth..old_end] to move into [..new_end] new[..].copy_from_slice(&old[..growth]); self.end -= growth; if self.end < growth { // Less than half the data is yet to be moved, we can split + copy. let (start, data) = self.memory.split_at_mut(growth); start[..].copy_from_slice(&data[..self.end]) } else { // Not enough room to split + copy, we copy each byte one at a time. for i in 0..=self.end { self.memory[i] = self.memory[i + growth]; } } } } true } pub(crate) fn available_data(&self) -> usize { self.available_data } pub(crate) fn available_space(&self) -> usize { self.capacity - self.available_data } pub(crate) fn consume(&mut self, count: usize) -> usize { let cnt = cmp::min(count, self.available_data()); self.position += cnt; self.position %= self.capacity; self.available_data -= cnt; cnt } pub(crate) fn fill(&mut self, count: usize) -> usize
pub(crate) fn poll_write_to<T: AsyncWrite>( &self, cx: &mut Context<'_>, writer: Pin<&mut T>, ) -> Poll<io::Result<usize>> { if self.available_data() == 0 { Poll::Ready(Ok(0)) } else if self.end > self.position { writer.poll_write(cx, &self.memory[self.position..self.end]) } else { writer.poll_write_vectored( cx, &[ IoSlice::new(&self.memory[self.position..]), IoSlice::new(&self.memory[..self.end]), ], ) } } pub(crate) fn poll_read_from<T: AsyncRead>( &mut self, cx: &mut Context<'_>, reader: Pin<&mut T>, ) -> Poll<io::Result<usize>> { if self.available_space() == 0 { Poll::Ready(Ok(0)) } else if self.end >= self.position { let (start, end) = self.memory.split_at_mut(self.end); reader.poll_read_vectored( cx, &mut [ IoSliceMut::new(&mut end[..]), IoSliceMut::new(&mut start[..self.position]), ][..], ) } else { reader.poll_read(cx, &mut self.memory[self.end..self.position]) } } pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize { let data = &self.memory[self.position..self.position]; let dataptr = data.as_ptr() as usize; let bufptr = buf.as_ptr() as usize; if dataptr < bufptr { bufptr - dataptr } else { let start = &self.memory[..0]; let startptr = start.as_ptr() as usize; bufptr + self.capacity - self.position - startptr } } pub(crate) fn parsing_context(&self) -> ParsingContext<'_> { if self.available_data() == 0 { self.memory[self.end..self.end].into() } else if self.end > self.position { self.memory[self.position..self.end].into() } else { [&self.memory[self.position..], &self.memory[..self.end]].into() } } } impl io::Write for &mut Buffer { fn write(&mut self, data: &[u8]) -> io::Result<usize> { let amt = if self.available_space() == 0 { 0 } else if self.end >= self.position { let mut space = &mut self.memory[self.end..]; let mut amt = space.write(data)?; if amt == self.capacity - self.end { let mut space = &mut self.memory[..self.position]; amt += space.write(&data[amt..])?; } amt } else { let mut space = &mut self.memory[self.end..self.position]; space.write(data)? }; self.fill(amt); Ok(amt) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl BackToTheBuffer for &mut Buffer { fn reserve_write_use< Tmp, Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>, Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>, >( s: WriteContext<Self>, reserved: usize, gen: &Gen, before: &Before, ) -> Result<WriteContext<Self>, GenError> { if s.write.available_space() < reserved { return Err(GenError::BufferTooSmall( reserved - s.write.available_space(), )); } let start = s.write.checkpoint(); s.write.fill(reserved); gen(s).and_then(|(s, tmp)| { let mut end = s.write.checkpoint(); end.backwards = false; s.write.rollback(start); before(s, tmp).map(|s| { s.write.rollback(end); s }) }) } }
{ let cnt = cmp::min(count, self.available_space()); self.end += cnt; self.end %= self.capacity; self.available_data += cnt; cnt }
identifier_body
scalar.rs
use std::cmp; use std::fmt; use std::ops; use num; use math::common::LinearInterpolate; pub type IntScalar = i32; #[cfg(not(feature = "float64"))] pub type FloatScalar = f32; #[cfg(feature = "float64")] pub type FloatScalar = f64; pub trait BaseNum where Self: Copy + Clone + fmt::Debug + cmp::PartialOrd, Self: num::Num + num::NumCast + num::ToPrimitive, Self: ops::AddAssign + ops::SubAssign + ops::MulAssign + ops::DivAssign {} pub trait BaseFloat: BaseNum + num::Float + num::Signed {} pub trait BaseInt: BaseNum {} impl BaseNum for i8 {} impl BaseNum for i16 {} impl BaseNum for i32 {} impl BaseNum for i64 {} impl BaseNum for isize {} impl BaseNum for u8 {} impl BaseNum for u16 {} impl BaseNum for u32 {} impl BaseNum for u64 {} impl BaseNum for usize {} impl BaseNum for f32 {} impl BaseNum for f64 {} impl BaseInt for i8 {} impl BaseInt for i16 {} impl BaseInt for i32 {} impl BaseInt for i64 {} impl BaseInt for isize {} impl BaseInt for u8 {} impl BaseInt for u16 {} impl BaseInt for u32 {} impl BaseInt for u64 {} impl BaseInt for usize {} impl BaseFloat for f32 {} impl BaseFloat for f64 {} pub fn partial_min<T: cmp::PartialOrd>(a: T, b: T) -> T
pub fn partial_max<T: cmp::PartialOrd>(a: T, b: T) -> T { if a > b { a } else { b } } impl LinearInterpolate for f32 { type Scalar = f32; } impl LinearInterpolate for f64 { type Scalar = f64; }
{ if a < b { a } else { b } }
identifier_body
scalar.rs
use std::cmp; use std::fmt; use std::ops; use num; use math::common::LinearInterpolate; pub type IntScalar = i32; #[cfg(not(feature = "float64"))] pub type FloatScalar = f32; #[cfg(feature = "float64")] pub type FloatScalar = f64; pub trait BaseNum where Self: Copy + Clone + fmt::Debug + cmp::PartialOrd, Self: num::Num + num::NumCast + num::ToPrimitive, Self: ops::AddAssign + ops::SubAssign + ops::MulAssign + ops::DivAssign {} pub trait BaseFloat: BaseNum + num::Float + num::Signed {} pub trait BaseInt: BaseNum {} impl BaseNum for i8 {} impl BaseNum for i16 {} impl BaseNum for i32 {} impl BaseNum for i64 {} impl BaseNum for isize {} impl BaseNum for u8 {} impl BaseNum for u16 {} impl BaseNum for u32 {} impl BaseNum for u64 {} impl BaseNum for usize {} impl BaseNum for f32 {} impl BaseNum for f64 {} impl BaseInt for i8 {} impl BaseInt for i16 {} impl BaseInt for i32 {} impl BaseInt for i64 {} impl BaseInt for isize {} impl BaseInt for u8 {} impl BaseInt for u16 {} impl BaseInt for u32 {} impl BaseInt for u64 {} impl BaseInt for usize {} impl BaseFloat for f32 {} impl BaseFloat for f64 {} pub fn partial_min<T: cmp::PartialOrd>(a: T, b: T) -> T { if a < b { a } else { b } }
if a > b { a } else { b } } impl LinearInterpolate for f32 { type Scalar = f32; } impl LinearInterpolate for f64 { type Scalar = f64; }
pub fn partial_max<T: cmp::PartialOrd>(a: T, b: T) -> T {
random_line_split
scalar.rs
use std::cmp; use std::fmt; use std::ops; use num; use math::common::LinearInterpolate; pub type IntScalar = i32; #[cfg(not(feature = "float64"))] pub type FloatScalar = f32; #[cfg(feature = "float64")] pub type FloatScalar = f64; pub trait BaseNum where Self: Copy + Clone + fmt::Debug + cmp::PartialOrd, Self: num::Num + num::NumCast + num::ToPrimitive, Self: ops::AddAssign + ops::SubAssign + ops::MulAssign + ops::DivAssign {} pub trait BaseFloat: BaseNum + num::Float + num::Signed {} pub trait BaseInt: BaseNum {} impl BaseNum for i8 {} impl BaseNum for i16 {} impl BaseNum for i32 {} impl BaseNum for i64 {} impl BaseNum for isize {} impl BaseNum for u8 {} impl BaseNum for u16 {} impl BaseNum for u32 {} impl BaseNum for u64 {} impl BaseNum for usize {} impl BaseNum for f32 {} impl BaseNum for f64 {} impl BaseInt for i8 {} impl BaseInt for i16 {} impl BaseInt for i32 {} impl BaseInt for i64 {} impl BaseInt for isize {} impl BaseInt for u8 {} impl BaseInt for u16 {} impl BaseInt for u32 {} impl BaseInt for u64 {} impl BaseInt for usize {} impl BaseFloat for f32 {} impl BaseFloat for f64 {} pub fn partial_min<T: cmp::PartialOrd>(a: T, b: T) -> T { if a < b { a } else
} pub fn partial_max<T: cmp::PartialOrd>(a: T, b: T) -> T { if a > b { a } else { b } } impl LinearInterpolate for f32 { type Scalar = f32; } impl LinearInterpolate for f64 { type Scalar = f64; }
{ b }
conditional_block
scalar.rs
use std::cmp; use std::fmt; use std::ops; use num; use math::common::LinearInterpolate; pub type IntScalar = i32; #[cfg(not(feature = "float64"))] pub type FloatScalar = f32; #[cfg(feature = "float64")] pub type FloatScalar = f64; pub trait BaseNum where Self: Copy + Clone + fmt::Debug + cmp::PartialOrd, Self: num::Num + num::NumCast + num::ToPrimitive, Self: ops::AddAssign + ops::SubAssign + ops::MulAssign + ops::DivAssign {} pub trait BaseFloat: BaseNum + num::Float + num::Signed {} pub trait BaseInt: BaseNum {} impl BaseNum for i8 {} impl BaseNum for i16 {} impl BaseNum for i32 {} impl BaseNum for i64 {} impl BaseNum for isize {} impl BaseNum for u8 {} impl BaseNum for u16 {} impl BaseNum for u32 {} impl BaseNum for u64 {} impl BaseNum for usize {} impl BaseNum for f32 {} impl BaseNum for f64 {} impl BaseInt for i8 {} impl BaseInt for i16 {} impl BaseInt for i32 {} impl BaseInt for i64 {} impl BaseInt for isize {} impl BaseInt for u8 {} impl BaseInt for u16 {} impl BaseInt for u32 {} impl BaseInt for u64 {} impl BaseInt for usize {} impl BaseFloat for f32 {} impl BaseFloat for f64 {} pub fn
<T: cmp::PartialOrd>(a: T, b: T) -> T { if a < b { a } else { b } } pub fn partial_max<T: cmp::PartialOrd>(a: T, b: T) -> T { if a > b { a } else { b } } impl LinearInterpolate for f32 { type Scalar = f32; } impl LinearInterpolate for f64 { type Scalar = f64; }
partial_min
identifier_name
nagesenBox.worker.ts
namespace NaaS { class WorkerTimer { private timerID: number | null = null; public onMessage(e: MessageEvent): void { switch (e.data.cmd) { case 'Start': this.start(e.data.fps); break; case 'Stop': this.stop(); break; case 'Enqueue': this.enqueue(e.data.args); break; } } private start(fps: number): void { if (this.timerID === null) { this.timerID = self.setInterval(() => { (self as any).postMessage({ cmd: 'Interval' }); }, 1000 / fps); } } private st
: void { if (this.timerID !== null) { self.clearInterval(this.timerID); this.timerID = null; } } private enqueue(args: ThrowCoinEventArgs): void { (self as any).postMessage({ cmd: 'Enqueue', args }); } } var workerTimer = new WorkerTimer(); self.addEventListener('message', e => workerTimer.onMessage(e)); }
op()
identifier_name
nagesenBox.worker.ts
namespace NaaS { class WorkerTimer { private timerID: number | null = null; public onMessage(e: MessageEvent): void { switch (e.data.cmd) { case 'Start': this.start(e.data.fps); break; case 'Stop': this.stop(); break; case 'Enqueue': this.enqueue(e.data.args); break; } } private start(fps: number): void { if (this.timerID === null) { this.timerID = self.setInterval(() => { (self as any).postMessage({ cmd: 'Interval' }); }, 1000 / fps); } } private stop(): void { if (this.timerID !== null) {
} private enqueue(args: ThrowCoinEventArgs): void { (self as any).postMessage({ cmd: 'Enqueue', args }); } } var workerTimer = new WorkerTimer(); self.addEventListener('message', e => workerTimer.onMessage(e)); }
self.clearInterval(this.timerID); this.timerID = null; }
conditional_block
nagesenBox.worker.ts
namespace NaaS { class WorkerTimer { private timerID: number | null = null; public onMessage(e: MessageEvent): void { switch (e.data.cmd) { case 'Start': this.start(e.data.fps); break; case 'Stop': this.stop(); break; case 'Enqueue': this.enqueue(e.data.args); break; }
this.timerID = self.setInterval(() => { (self as any).postMessage({ cmd: 'Interval' }); }, 1000 / fps); } } private stop(): void { if (this.timerID !== null) { self.clearInterval(this.timerID); this.timerID = null; } } private enqueue(args: ThrowCoinEventArgs): void { (self as any).postMessage({ cmd: 'Enqueue', args }); } } var workerTimer = new WorkerTimer(); self.addEventListener('message', e => workerTimer.onMessage(e)); }
} private start(fps: number): void { if (this.timerID === null) {
random_line_split
seedBreedLookup.js
const mongoose = require("mongoose"); const db = require("../../models"); mongoose.Promise = global.Promise; //This file seeds the database mongoose.connect( process.env.MONGODB_URI || "mongodb://localhost/petrescuers", { useMongoClient: true } ); //Seeding the breed collection with the breed recommendation (breed) for each search combination (breedId) const breedLookupSeed = [ { breedId: 'smallhomelightcalm', breed: 'Chihuahua' }, { breedId: 'smallhomeanycalm', breed: 'Pug' }, { breedId: 'smallhomelighthighenergy', breed: 'Beagle' }, { breedId: 'smallhomeanyhighenergy', breed: 'Jack Russell Terrier' }, { breedId: 'smallapartmentlightcalm', breed: 'Shih Tzu' }, { breedId: 'smallapartmentanycalm', breed: 'Dachshund' }, { breedId: 'smallapartmentlighthighenergy', breed: 'Poodle' }, { breedId: 'smallapartmentanyhighenergy', breed: 'Corgi' }, { breedId: 'mediumhomelightcalm', breed: 'Chow Chow' }, { breedId: 'mediumhomeanycalm', breed: 'Hound' }, { breedId: 'mediumhomelighthighenergy', breed: 'Pit Bull' }, { breedId: 'mediumhomeanyhighenergy', breed: 'Labrador Retriever' }, { breedId: 'mediumapartmentlightcalm', breed: 'Bulldog' }, { breedId: 'mediumapartmentanycalm', breed: 'Shar Pei' }, { breedId: 'mediumapartmentlighthighenergy', breed: 'Border Collie' }, { breedId: 'mediumapartmentanyhighenergy', breed: 'Boxer' }, { breedId: 'largehomelightcalm', breed: 'Mastiff' }, { breedId: 'largehomeanycalm', breed: 'Great Dane' }, { breedId: 'largehomelighthighenergy', breed: 'Pit Bull' }, { breedId: 'largehomeanyhighenergy', breed: 'German Shepherd' }, { breedId: 'largeapartmentlightcalm', breed: 'American Bulldog' }, { breedId: 'largeapartmentanycalm', breed: 'Shar Pei' }, { breedId: 'largeapartmentlighthighenergy', breed: 'Border Collie' }, { breedId: 'largeapartmentanyhighenergy', breed: 'Greyhound' }, { breedId: 'smallyesyes', breed: 'Bombay' }, { breedId: 'smallyesno', breed: 'Russian Blue' }, { breedId: 'smallnono', breed: 'Persian' }, { breedId: 'smallnoyes', breed: 'Oriental' }, { breedId: 'mediumyesyes', breed: 'Siamese' }, { breedId: 'mediumyesno', breed: 'Russian Blue' }, { breedId: 'mediumnono', breed: 'Persian' }, { breedId: 'mediumnoyes', breed: 'Turkish Van' }, { breedId: 'largeyesyes', breed: 'Maine Coon' }, { breedId: 'largeyesno', breed: 'Russian Blue' }, { breedId: 'largenono', breed: 'American Shorthair' }, { breedId: 'largenoyes', breed: 'Manx' }, ]; db.BreedLookup .remove({}) .then(() => db.BreedLookup.collection.insertMany(breedLookupSeed)) .then(data => {
process.exit(0); }) .catch(err => { console.error(err); process.exit(1); });
console.log(data.insertedIds.length + " records inserted!");
random_line_split
djangojs.js
(function(globals) { var django = globals.django || (globals.django = {}); django.pluralidx = function(n) { var v=(n != 1); if (typeof(v) == 'boolean') { return v ? 1 : 0; } else { return v; } }; /* gettext library */ django.catalog = django.catalog || {}; var newcatalog = { "6 a.m.": "\u09ec \u09aa\u09c2\u09b0\u09cd\u09ac\u09be\u09b9\u09cd\u09a8", "Available %s": "%s \u09ac\u09bf\u09a6\u09cd\u09af\u09ae\u09be\u09a8", "Cancel": "\u09ac\u09be\u09a4\u09bf\u09b2", "Choose": "\u09ac\u09be\u099b\u09be\u0987 \u0995\u09b0\u09c1\u09a8", "Choose a time": "\u09b8\u09ae\u09df \u09a8\u09bf\u09b0\u09cd\u09ac\u09be\u099a\u09a8 \u0995\u09b0\u09c1\u09a8", "Choose all": "\u09b8\u09ac \u09ac\u09be\u099b\u09be\u0987 \u0995\u09b0\u09c1\u09a8", "Chosen %s": "%s \u09ac\u09be\u099b\u09be\u0987 \u0995\u09b0\u09be \u09b9\u09df\u09c7\u099b\u09c7", "Click to choose all %s at once.": "\u09b8\u09ac %s \u098f\u0995\u09ac\u09be\u09b0\u09c7 \u09ac\u09be\u099b\u09be\u0987 \u0995\u09b0\u09be\u09b0 \u099c\u09a8\u09cd\u09af \u0995\u09cd\u09b2\u09bf\u0995 \u0995\u09b0\u09c1\u09a8\u0964", "Filter": "\u09ab\u09bf\u09b2\u09cd\u099f\u09be\u09b0", "Hide": "\u09b2\u09c1\u0995\u09be\u09a8", "Midnight": "\u09ae\u09a7\u09cd\u09af\u09b0\u09be\u09a4", "Noon": "\u09a6\u09c1\u09aa\u09c1\u09b0", "Note: You are %s hour ahead of server time.": [ "\u09a8\u09cb\u099f: \u0986\u09aa\u09a8\u09bf \u09b8\u09be\u09b0\u09cd\u09ad\u09be\u09b0 \u09b8\u09ae\u09df\u09c7\u09b0 \u099a\u09c7\u09df\u09c7 %s \u0998\u09a8\u09cd\u099f\u09be \u09b8\u09be\u09ae\u09a8\u09c7 \u0986\u099b\u09c7\u09a8\u0964", "\u09a8\u09cb\u099f: \u0986\u09aa\u09a8\u09bf \u09b8\u09be\u09b0\u09cd\u09ad\u09be\u09b0 \u09b8\u09ae\u09df\u09c7\u09b0 \u099a\u09c7\u09df\u09c7 %s \u0998\u09a8\u09cd\u099f\u09be \u09b8\u09be\u09ae\u09a8\u09c7 \u0986\u099b\u09c7\u09a8\u0964" ], "Note: You are %s hour behind server time.": [ "\u09a8\u09cb\u099f: \u0986\u09aa\u09a8\u09bf \u09b8\u09be\u09b0\u09cd\u09ad\u09be\u09b0 \u09b8\u09ae\u09df\u09c7\u09b0 \u099a\u09c7\u09df\u09c7 %s \u0998\u09a8\u09cd\u099f\u09be \u09aa\u09c7\u099b\u09a8\u09c7 \u0986\u099b\u09c7\u09a8\u0964", "\u09a8\u09cb\u099f: \u0986\u09aa\u09a8\u09bf \u09b8\u09be\u09b0\u09cd\u09ad\u09be\u09b0 \u09b8\u09ae\u09df\u09c7\u09b0 \u099a\u09c7\u09df\u09c7 %s \u0998\u09a8\u09cd\u099f\u09be \u09aa\u09c7\u099b\u09a8\u09c7 \u0986\u099b\u09c7\u09a8\u0964" ], "Now": "\u098f\u0996\u09a8", "Remove": "\u09ae\u09c1\u099b\u09c7 \u09ab\u09c7\u09b2\u09c1\u09a8", "Remove all": "\u09b8\u09ac \u09ae\u09c1\u099b\u09c7 \u09ab\u09c7\u09b2\u09c1\u09a8", "Show": "\u09a6\u09c7\u0996\u09be\u09a8", "Today": "\u0986\u099c", "Tomorrow": "\u0986\u0997\u09be\u09ae\u09c0\u0995\u09be\u09b2", "Yesterday": "\u0997\u09a4\u0995\u09be\u09b2" }; for (var key in newcatalog) { django.catalog[key] = newcatalog[key]; } if (!django.jsi18n_initialized) { django.gettext = function(msgid) { var value = django.catalog[msgid]; if (typeof(value) == 'undefined') { return msgid; } else { return (typeof(value) == 'string') ? value : value[0]; } }; django.ngettext = function(singular, plural, count) { var value = django.catalog[singular]; if (typeof(value) == 'undefined') { return (count == 1) ? singular : plural; } else { return value[django.pluralidx(count)]; } }; django.gettext_noop = function(msgid) { return msgid; }; django.pgettext = function(context, msgid) { var value = django.gettext(context + '\x04' + msgid); if (value.indexOf('\x04') != -1) { value = msgid; } return value; }; django.npgettext = function(context, singular, plural, count) { var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count); if (value.indexOf('\x04') != -1) { value = django.ngettext(singular, plural, count); } return value;
}; django.interpolate = function(fmt, obj, named) { if (named) { return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])}); } else { return fmt.replace(/%s/g, function(match){return String(obj.shift())}); } }; /* formatting library */ django.formats = { "DATETIME_FORMAT": "N j, Y, P", "DATETIME_INPUT_FORMATS": [ "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S.%f", "%Y-%m-%d %H:%M", "%Y-%m-%d" ], "DATE_FORMAT": "j F, Y", "DATE_INPUT_FORMATS": [ "%d/%m/%Y", "%d/%m/%y", "%d-%m-%Y", "%d-%m-%y", "%Y-%m-%d" ], "DECIMAL_SEPARATOR": ".", "FIRST_DAY_OF_WEEK": "6", "MONTH_DAY_FORMAT": "j F", "NUMBER_GROUPING": "0", "SHORT_DATETIME_FORMAT": "m/d/Y P", "SHORT_DATE_FORMAT": "j M, Y", "THOUSAND_SEPARATOR": ",", "TIME_FORMAT": "g:i A", "TIME_INPUT_FORMATS": [ "%H:%M:%S", "%H:%M", "%H:%M:%S.%f" ], "YEAR_MONTH_FORMAT": "F Y" }; django.get_format = function(format_type) { var value = django.formats[format_type]; if (typeof(value) == 'undefined') { return format_type; } else { return value; } }; /* add to global namespace */ globals.pluralidx = django.pluralidx; globals.gettext = django.gettext; globals.ngettext = django.ngettext; globals.gettext_noop = django.gettext_noop; globals.pgettext = django.pgettext; globals.npgettext = django.npgettext; globals.interpolate = django.interpolate; globals.get_format = django.get_format; django.jsi18n_initialized = true; } }(this));
random_line_split
djangojs.js
(function(globals) { var django = globals.django || (globals.django = {}); django.pluralidx = function(n) { var v=(n != 1); if (typeof(v) == 'boolean') { return v ? 1 : 0; } else { return v; } }; /* gettext library */ django.catalog = django.catalog || {}; var newcatalog = { "6 a.m.": "\u09ec \u09aa\u09c2\u09b0\u09cd\u09ac\u09be\u09b9\u09cd\u09a8", "Available %s": "%s \u09ac\u09bf\u09a6\u09cd\u09af\u09ae\u09be\u09a8", "Cancel": "\u09ac\u09be\u09a4\u09bf\u09b2", "Choose": "\u09ac\u09be\u099b\u09be\u0987 \u0995\u09b0\u09c1\u09a8", "Choose a time": "\u09b8\u09ae\u09df \u09a8\u09bf\u09b0\u09cd\u09ac\u09be\u099a\u09a8 \u0995\u09b0\u09c1\u09a8", "Choose all": "\u09b8\u09ac \u09ac\u09be\u099b\u09be\u0987 \u0995\u09b0\u09c1\u09a8", "Chosen %s": "%s \u09ac\u09be\u099b\u09be\u0987 \u0995\u09b0\u09be \u09b9\u09df\u09c7\u099b\u09c7", "Click to choose all %s at once.": "\u09b8\u09ac %s \u098f\u0995\u09ac\u09be\u09b0\u09c7 \u09ac\u09be\u099b\u09be\u0987 \u0995\u09b0\u09be\u09b0 \u099c\u09a8\u09cd\u09af \u0995\u09cd\u09b2\u09bf\u0995 \u0995\u09b0\u09c1\u09a8\u0964", "Filter": "\u09ab\u09bf\u09b2\u09cd\u099f\u09be\u09b0", "Hide": "\u09b2\u09c1\u0995\u09be\u09a8", "Midnight": "\u09ae\u09a7\u09cd\u09af\u09b0\u09be\u09a4", "Noon": "\u09a6\u09c1\u09aa\u09c1\u09b0", "Note: You are %s hour ahead of server time.": [ "\u09a8\u09cb\u099f: \u0986\u09aa\u09a8\u09bf \u09b8\u09be\u09b0\u09cd\u09ad\u09be\u09b0 \u09b8\u09ae\u09df\u09c7\u09b0 \u099a\u09c7\u09df\u09c7 %s \u0998\u09a8\u09cd\u099f\u09be \u09b8\u09be\u09ae\u09a8\u09c7 \u0986\u099b\u09c7\u09a8\u0964", "\u09a8\u09cb\u099f: \u0986\u09aa\u09a8\u09bf \u09b8\u09be\u09b0\u09cd\u09ad\u09be\u09b0 \u09b8\u09ae\u09df\u09c7\u09b0 \u099a\u09c7\u09df\u09c7 %s \u0998\u09a8\u09cd\u099f\u09be \u09b8\u09be\u09ae\u09a8\u09c7 \u0986\u099b\u09c7\u09a8\u0964" ], "Note: You are %s hour behind server time.": [ "\u09a8\u09cb\u099f: \u0986\u09aa\u09a8\u09bf \u09b8\u09be\u09b0\u09cd\u09ad\u09be\u09b0 \u09b8\u09ae\u09df\u09c7\u09b0 \u099a\u09c7\u09df\u09c7 %s \u0998\u09a8\u09cd\u099f\u09be \u09aa\u09c7\u099b\u09a8\u09c7 \u0986\u099b\u09c7\u09a8\u0964", "\u09a8\u09cb\u099f: \u0986\u09aa\u09a8\u09bf \u09b8\u09be\u09b0\u09cd\u09ad\u09be\u09b0 \u09b8\u09ae\u09df\u09c7\u09b0 \u099a\u09c7\u09df\u09c7 %s \u0998\u09a8\u09cd\u099f\u09be \u09aa\u09c7\u099b\u09a8\u09c7 \u0986\u099b\u09c7\u09a8\u0964" ], "Now": "\u098f\u0996\u09a8", "Remove": "\u09ae\u09c1\u099b\u09c7 \u09ab\u09c7\u09b2\u09c1\u09a8", "Remove all": "\u09b8\u09ac \u09ae\u09c1\u099b\u09c7 \u09ab\u09c7\u09b2\u09c1\u09a8", "Show": "\u09a6\u09c7\u0996\u09be\u09a8", "Today": "\u0986\u099c", "Tomorrow": "\u0986\u0997\u09be\u09ae\u09c0\u0995\u09be\u09b2", "Yesterday": "\u0997\u09a4\u0995\u09be\u09b2" }; for (var key in newcatalog) { django.catalog[key] = newcatalog[key]; } if (!django.jsi18n_initialized) { django.gettext = function(msgid) { var value = django.catalog[msgid]; if (typeof(value) == 'undefined') { return msgid; } else { return (typeof(value) == 'string') ? value : value[0]; } }; django.ngettext = function(singular, plural, count) { var value = django.catalog[singular]; if (typeof(value) == 'undefined')
else { return value[django.pluralidx(count)]; } }; django.gettext_noop = function(msgid) { return msgid; }; django.pgettext = function(context, msgid) { var value = django.gettext(context + '\x04' + msgid); if (value.indexOf('\x04') != -1) { value = msgid; } return value; }; django.npgettext = function(context, singular, plural, count) { var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count); if (value.indexOf('\x04') != -1) { value = django.ngettext(singular, plural, count); } return value; }; django.interpolate = function(fmt, obj, named) { if (named) { return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])}); } else { return fmt.replace(/%s/g, function(match){return String(obj.shift())}); } }; /* formatting library */ django.formats = { "DATETIME_FORMAT": "N j, Y, P", "DATETIME_INPUT_FORMATS": [ "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S.%f", "%Y-%m-%d %H:%M", "%Y-%m-%d" ], "DATE_FORMAT": "j F, Y", "DATE_INPUT_FORMATS": [ "%d/%m/%Y", "%d/%m/%y", "%d-%m-%Y", "%d-%m-%y", "%Y-%m-%d" ], "DECIMAL_SEPARATOR": ".", "FIRST_DAY_OF_WEEK": "6", "MONTH_DAY_FORMAT": "j F", "NUMBER_GROUPING": "0", "SHORT_DATETIME_FORMAT": "m/d/Y P", "SHORT_DATE_FORMAT": "j M, Y", "THOUSAND_SEPARATOR": ",", "TIME_FORMAT": "g:i A", "TIME_INPUT_FORMATS": [ "%H:%M:%S", "%H:%M", "%H:%M:%S.%f" ], "YEAR_MONTH_FORMAT": "F Y" }; django.get_format = function(format_type) { var value = django.formats[format_type]; if (typeof(value) == 'undefined') { return format_type; } else { return value; } }; /* add to global namespace */ globals.pluralidx = django.pluralidx; globals.gettext = django.gettext; globals.ngettext = django.ngettext; globals.gettext_noop = django.gettext_noop; globals.pgettext = django.pgettext; globals.npgettext = django.npgettext; globals.interpolate = django.interpolate; globals.get_format = django.get_format; django.jsi18n_initialized = true; } }(this));
{ return (count == 1) ? singular : plural; }
conditional_block
Globals.js
/* * * * (c) 2010-2019 Torstein Honsi * * License: www.highcharts.com/license * * !!!!!!! SOURCE GETS TRANSPILED BY TYPESCRIPT. EDIT TS FILE ONLY. !!!!!!! * * */ 'use strict'; /* globals Image, window */ /** * Reference to the global SVGElement class as a workaround for a name conflict * in the Highcharts namespace. * * @global * @typedef {global.SVGElement} GlobalSVGElement * * @see https://developer.mozilla.org/en-US/docs/Web/API/SVGElement */ // glob is a temporary fix to allow our es-modules to work. var glob = ( // @todo UMD variable named `window`, and glob named `win` typeof win !== 'undefined' ? win : typeof window !== 'undefined' ? window : {}), doc = glob.document, SVG_NS = 'http://www.w3.org/2000/svg', userAgent = (glob.navigator && glob.navigator.userAgent) || '', svg = (doc && doc.createElementNS && !!doc.createElementNS(SVG_NS, 'svg').createSVGRect), isMS = /(edge|msie|trident)/i.test(userAgent) && !glob.opera, isFirefox = userAgent.indexOf('Firefox') !== -1, isChrome = userAgent.indexOf('Chrome') !== -1, hasBidiBug = (isFirefox && parseInt(userAgent.split('Firefox/')[1], 10) < 4 // issue #38
); var H = { product: 'Highcharts', version: '8.0.0', deg2rad: Math.PI * 2 / 360, doc: doc, hasBidiBug: hasBidiBug, hasTouch: !!glob.TouchEvent, isMS: isMS, isWebKit: userAgent.indexOf('AppleWebKit') !== -1, isFirefox: isFirefox, isChrome: isChrome, isSafari: !isChrome && userAgent.indexOf('Safari') !== -1, isTouchDevice: /(Mobile|Android|Windows Phone)/.test(userAgent), SVG_NS: SVG_NS, chartCount: 0, seriesTypes: {}, symbolSizes: {}, svg: svg, win: glob, marginNames: ['plotTop', 'marginRight', 'marginBottom', 'plotLeft'], noop: function () { }, /** * An array containing the current chart objects in the page. A chart's * position in the array is preserved throughout the page's lifetime. When * a chart is destroyed, the array item becomes `undefined`. * * @name Highcharts.charts * @type {Array<Highcharts.Chart|undefined>} */ charts: [], /** * A hook for defining additional date format specifiers. New * specifiers are defined as key-value pairs by using the * specifier as key, and a function which takes the timestamp as * value. This function returns the formatted portion of the * date. * * @sample highcharts/global/dateformats/ * Adding support for week number * * @name Highcharts.dateFormats * @type {Highcharts.Dictionary<Highcharts.TimeFormatCallbackFunction>} */ dateFormats: {} }; export default H;
random_line_split
__init__.py
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides fakes for several of Telemetry's internal objects. These allow code like story_runner and Benchmark to be run and tested without compiling or starting a browser. Class names prepended with an underscore are intended to be implementation details, and should not be subclassed; however, some, like _FakeBrowser, have public APIs that may need to be called in tests. """ from telemetry.internal.backends.chrome_inspector import websocket from telemetry.internal.browser import browser_options from telemetry.internal.platform import system_info from telemetry.page import shared_page_state from telemetry.util import image_util from telemetry.testing.internal import fake_gpu_info from types import ModuleType # Classes and functions which are intended to be part of the public # fakes API. class FakePlatform(object): def __init__(self): self._network_controller = None self._tracing_controller = None self._has_battor = False self._os_name = 'FakeOS' self._device_type_name = 'abc' self._is_svelte = False self._is_aosp = True @property def is_host_platform(self): raise NotImplementedError @property def network_controller(self): if self._network_controller is None: self._network_controller = _FakeNetworkController() return self._network_controller @property def tracing_controller(self): if self._tracing_controller is None: self._tracing_controller = _FakeTracingController() return self._tracing_controller def Initialize(self): pass def CanMonitorThermalThrottling(self): return False def IsThermallyThrottled(self): return False def HasBeenThermallyThrottled(self): return False def GetArchName(self): raise NotImplementedError def SetOSName(self, name): self._os_name = name def GetOSName(self): return self._os_name def GetOSVersionName(self): raise NotImplementedError def GetOSVersionDetailString(self): raise NotImplementedError def StopAllLocalServers(self): pass def WaitForBatteryTemperature(self, _): pass def HasBattOrConnected(self): return self._has_battor def SetBattOrDetected(self, b): assert isinstance(b, bool) self._has_battor = b # TODO(rnephew): Investigate moving from setters to @property. def SetDeviceTypeName(self, name): self._device_type_name = name def GetDeviceTypeName(self):
def SetIsSvelte(self, b): assert isinstance(b, bool) self._is_svelte = b def IsSvelte(self): if self._os_name != 'android': raise NotImplementedError return self._is_svelte def SetIsAosp(self, b): assert isinstance(b, bool) self._is_aosp = b def IsAosp(self): return self._is_aosp and self._os_name == 'android' class FakeLinuxPlatform(FakePlatform): def __init__(self): super(FakeLinuxPlatform, self).__init__() self.screenshot_png_data = None self.http_server_directories = [] self.http_server = FakeHTTPServer() @property def is_host_platform(self): return True def GetDeviceTypeName(self): return 'Desktop' def GetArchName(self): return 'x86_64' def GetOSName(self): return 'linux' def GetOSVersionName(self): return 'trusty' def GetOSVersionDetailString(self): return '' def CanTakeScreenshot(self): return bool(self.screenshot_png_data) def TakeScreenshot(self, file_path): if not self.CanTakeScreenshot(): raise NotImplementedError img = image_util.FromBase64Png(self.screenshot_png_data) image_util.WritePngFile(img, file_path) return True def SetHTTPServerDirectories(self, paths): self.http_server_directories.append(paths) class FakeHTTPServer(object): def UrlOf(self, url): del url # unused return 'file:///foo' class FakePossibleBrowser(object): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None): self._returned_browser = _FakeBrowser(FakeLinuxPlatform()) self.browser_type = 'linux' self.supports_tab_control = False self.is_remote = False self.execute_on_startup = execute_on_startup self.execute_after_browser_creation = execute_after_browser_creation @property def returned_browser(self): """The browser object that will be returned through later API calls.""" return self._returned_browser def Create(self, finder_options): if self.execute_on_startup is not None: self.execute_on_startup() del finder_options # unused if self.execute_after_browser_creation is not None: self.execute_after_browser_creation(self._returned_browser) return self.returned_browser @property def platform(self): """The platform object from the returned browser. To change this or set it up, change the returned browser's platform. """ return self.returned_browser.platform def IsRemote(self): return self.is_remote def SetCredentialsPath(self, _): pass class FakeSharedPageState(shared_page_state.SharedPageState): def __init__(self, test, finder_options, story_set): super(FakeSharedPageState, self).__init__(test, finder_options, story_set) def _GetPossibleBrowser(self, test, finder_options): p = FakePossibleBrowser() self.ConfigurePossibleBrowser(p) return p def ConfigurePossibleBrowser(self, possible_browser): """Override this to configure the PossibleBrowser. Can make changes to the browser's configuration here via e.g.: possible_browser.returned_browser.returned_system_info = ... """ pass def DidRunStory(self, results): # TODO(kbr): add a test which throws an exception from DidRunStory # to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe . super(FakeSharedPageState, self).DidRunStory(results) class FakeSystemInfo(system_info.SystemInfo): def __init__(self, model_name='', gpu_dict=None, command_line=''): if gpu_dict == None: gpu_dict = fake_gpu_info.FAKE_GPU_INFO super(FakeSystemInfo, self).__init__(model_name, gpu_dict, command_line) class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None, *args, **kwargs): browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs) self.fake_possible_browser = \ FakePossibleBrowser( execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) def CreateBrowserFinderOptions(browser_type=None, execute_on_startup=None, execute_after_browser_creation=None): """Creates fake browser finder options for discovering a browser.""" return _FakeBrowserFinderOptions( browser_type=browser_type, execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) # Internal classes. Note that end users may still need to both call # and mock out methods of these classes, but they should not be # subclassed. class _FakeBrowser(object): def __init__(self, platform): self._tabs = _FakeTabList(self) # Fake the creation of the first tab. self._tabs.New() self._returned_system_info = FakeSystemInfo() self._platform = platform self._browser_type = 'release' self._is_crashed = False @property def platform(self): return self._platform @platform.setter def platform(self, incoming): """Allows overriding of the fake browser's platform object.""" assert isinstance(incoming, FakePlatform) self._platform = incoming @property def returned_system_info(self): """The object which will be returned from calls to GetSystemInfo.""" return self._returned_system_info @returned_system_info.setter def returned_system_info(self, incoming): """Allows overriding of the returned SystemInfo object. Incoming argument must be an instance of FakeSystemInfo.""" assert isinstance(incoming, FakeSystemInfo) self._returned_system_info = incoming @property def browser_type(self): """The browser_type this browser claims to be ('debug', 'release', etc.)""" return self._browser_type @browser_type.setter def browser_type(self, incoming): """Allows setting of the browser_type.""" self._browser_type = incoming @property def credentials(self): return _FakeCredentials() def Close(self): self._is_crashed = False @property def supports_system_info(self): return True def GetSystemInfo(self): return self.returned_system_info @property def supports_tab_control(self): return True @property def tabs(self): return self._tabs def DumpStateUponFailure(self): pass class _FakeCredentials(object): def WarnIfMissingCredentials(self, _): pass class _FakeTracingController(object): def __init__(self): self._is_tracing = False def StartTracing(self, tracing_config, timeout=10): self._is_tracing = True del tracing_config del timeout def StopTracing(self): self._is_tracing = False @property def is_tracing_running(self): return self._is_tracing def ClearStateIfNeeded(self): pass def IsChromeTracingSupported(self): return True class _FakeNetworkController(object): def __init__(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False self.use_live_traffic = None def InitializeIfNeeded(self, use_live_traffic=False): self.use_live_traffic = use_live_traffic def UpdateTrafficSettings(self, round_trip_latency_ms=None, download_bandwidth_kbps=None, upload_bandwidth_kbps=None): pass def Open(self, wpr_mode, extra_wpr_args, use_wpr_go=False): del use_wpr_go # Unused. self.wpr_mode = wpr_mode self.extra_wpr_args = extra_wpr_args self.is_open = True def Close(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False def StartReplay(self, archive_path, make_javascript_deterministic=False): del make_javascript_deterministic # Unused. assert self.is_open self.is_initialized = archive_path is not None def StopReplay(self): self.is_initialized = False class _FakeTab(object): def __init__(self, browser, tab_id): self._browser = browser self._tab_id = str(tab_id) self._collect_garbage_count = 0 self.test_png = None @property def collect_garbage_count(self): return self._collect_garbage_count @property def id(self): return self._tab_id @property def browser(self): return self._browser def WaitForDocumentReadyStateToBeComplete(self, timeout=0): pass def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=0): del script_to_evaluate_on_commit, timeout # unused if url == 'chrome://crash': self.browser._is_crashed = True raise Exception def WaitForDocumentReadyStateToBeInteractiveOrBetter(self, timeout=0): pass def WaitForFrameToBeDisplayed(self, timeout=0): pass def IsAlive(self): return True def CloseConnections(self): pass def CollectGarbage(self): self._collect_garbage_count += 1 def Close(self): pass @property def screenshot_supported(self): return self.test_png is not None def Screenshot(self): assert self.screenshot_supported, 'Screenshot is not supported' return image_util.FromBase64Png(self.test_png) class _FakeTabList(object): _current_tab_id = 0 def __init__(self, browser): self._tabs = [] self._browser = browser def New(self, timeout=300): del timeout # unused type(self)._current_tab_id += 1 t = _FakeTab(self._browser, type(self)._current_tab_id) self._tabs.append(t) return t def __iter__(self): return self._tabs.__iter__() def __len__(self): return len(self._tabs) def __getitem__(self, index): if self._tabs[index].browser._is_crashed: raise Exception else: return self._tabs[index] def GetTabById(self, identifier): """The identifier of a tab can be accessed with tab.id.""" for tab in self._tabs: if tab.id == identifier: return tab return None class FakeInspectorWebsocket(object): _NOTIFICATION_EVENT = 1 _NOTIFICATION_CALLBACK = 2 """A fake InspectorWebsocket. A fake that allows tests to send pregenerated data. Normal InspectorWebsockets allow for any number of domain handlers. This fake only allows up to 1 domain handler, and assumes that the domain of the response always matches that of the handler. """ def __init__(self, mock_timer): self._mock_timer = mock_timer self._notifications = [] self._response_handlers = {} self._pending_callbacks = {} self._handler = None def RegisterDomain(self, _, handler): self._handler = handler def AddEvent(self, method, params, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'params': params} self._notifications.append((response, time, self._NOTIFICATION_EVENT)) def AddAsyncResponse(self, method, result, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'result': result} self._notifications.append((response, time, self._NOTIFICATION_CALLBACK)) def AddResponseHandler(self, method, handler): self._response_handlers[method] = handler def SyncRequest(self, request, *args, **kwargs): del args, kwargs # unused handler = self._response_handlers[request['method']] return handler(request) if handler else None def AsyncRequest(self, request, callback): self._pending_callbacks.setdefault(request['method'], []).append(callback) def SendAndIgnoreResponse(self, request): pass def Connect(self, _): pass def DispatchNotifications(self, timeout): current_time = self._mock_timer.time() if not self._notifications: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() response, time, kind = self._notifications[0] if time - current_time > timeout: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() self._notifications.pop(0) self._mock_timer.SetTime(time + 1) if kind == self._NOTIFICATION_EVENT: self._handler(response) elif kind == self._NOTIFICATION_CALLBACK: callback = self._pending_callbacks.get(response['method']).pop(0) callback(response) else: raise Exception('Unexpected response type') class FakeTimer(object): """ A fake timer to fake out the timing for a module. Args: module: module to fake out the time """ def __init__(self, module=None): self._elapsed_time = 0 self._module = module self._actual_time = None if module: assert isinstance(module, ModuleType) self._actual_time = module.time self._module.time = self def sleep(self, time): self._elapsed_time += time def time(self): return self._elapsed_time def SetTime(self, time): self._elapsed_time = time def __del__(self): self.Restore() def Restore(self): if self._module: self._module.time = self._actual_time self._module = None self._actual_time = None
return self._device_type_name
identifier_body
__init__.py
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides fakes for several of Telemetry's internal objects. These allow code like story_runner and Benchmark to be run and tested without compiling or starting a browser. Class names prepended with an underscore are intended to be implementation details, and should not be subclassed; however, some, like _FakeBrowser, have public APIs that may need to be called in tests. """ from telemetry.internal.backends.chrome_inspector import websocket from telemetry.internal.browser import browser_options from telemetry.internal.platform import system_info from telemetry.page import shared_page_state from telemetry.util import image_util from telemetry.testing.internal import fake_gpu_info from types import ModuleType # Classes and functions which are intended to be part of the public # fakes API. class FakePlatform(object): def __init__(self): self._network_controller = None self._tracing_controller = None self._has_battor = False self._os_name = 'FakeOS' self._device_type_name = 'abc' self._is_svelte = False self._is_aosp = True @property def is_host_platform(self): raise NotImplementedError @property def network_controller(self): if self._network_controller is None: self._network_controller = _FakeNetworkController() return self._network_controller @property def tracing_controller(self): if self._tracing_controller is None: self._tracing_controller = _FakeTracingController() return self._tracing_controller def Initialize(self): pass def CanMonitorThermalThrottling(self): return False def IsThermallyThrottled(self): return False def HasBeenThermallyThrottled(self): return False def GetArchName(self): raise NotImplementedError def SetOSName(self, name): self._os_name = name def GetOSName(self): return self._os_name def GetOSVersionName(self): raise NotImplementedError def GetOSVersionDetailString(self): raise NotImplementedError def StopAllLocalServers(self): pass def WaitForBatteryTemperature(self, _): pass def HasBattOrConnected(self): return self._has_battor def SetBattOrDetected(self, b): assert isinstance(b, bool) self._has_battor = b # TODO(rnephew): Investigate moving from setters to @property. def SetDeviceTypeName(self, name): self._device_type_name = name def GetDeviceTypeName(self): return self._device_type_name def SetIsSvelte(self, b): assert isinstance(b, bool) self._is_svelte = b def IsSvelte(self): if self._os_name != 'android': raise NotImplementedError return self._is_svelte def SetIsAosp(self, b): assert isinstance(b, bool) self._is_aosp = b def IsAosp(self): return self._is_aosp and self._os_name == 'android' class FakeLinuxPlatform(FakePlatform): def __init__(self): super(FakeLinuxPlatform, self).__init__() self.screenshot_png_data = None self.http_server_directories = [] self.http_server = FakeHTTPServer() @property def is_host_platform(self): return True def GetDeviceTypeName(self): return 'Desktop' def GetArchName(self): return 'x86_64' def GetOSName(self): return 'linux' def GetOSVersionName(self): return 'trusty' def GetOSVersionDetailString(self): return '' def CanTakeScreenshot(self): return bool(self.screenshot_png_data) def TakeScreenshot(self, file_path): if not self.CanTakeScreenshot(): raise NotImplementedError img = image_util.FromBase64Png(self.screenshot_png_data) image_util.WritePngFile(img, file_path) return True def SetHTTPServerDirectories(self, paths): self.http_server_directories.append(paths) class FakeHTTPServer(object): def UrlOf(self, url): del url # unused return 'file:///foo' class FakePossibleBrowser(object): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None): self._returned_browser = _FakeBrowser(FakeLinuxPlatform()) self.browser_type = 'linux' self.supports_tab_control = False self.is_remote = False self.execute_on_startup = execute_on_startup self.execute_after_browser_creation = execute_after_browser_creation @property def returned_browser(self): """The browser object that will be returned through later API calls.""" return self._returned_browser def Create(self, finder_options): if self.execute_on_startup is not None: self.execute_on_startup() del finder_options # unused if self.execute_after_browser_creation is not None: self.execute_after_browser_creation(self._returned_browser) return self.returned_browser @property def platform(self): """The platform object from the returned browser. To change this or set it up, change the returned browser's platform. """ return self.returned_browser.platform def IsRemote(self): return self.is_remote def SetCredentialsPath(self, _): pass class FakeSharedPageState(shared_page_state.SharedPageState): def __init__(self, test, finder_options, story_set): super(FakeSharedPageState, self).__init__(test, finder_options, story_set) def _GetPossibleBrowser(self, test, finder_options): p = FakePossibleBrowser() self.ConfigurePossibleBrowser(p) return p def
(self, possible_browser): """Override this to configure the PossibleBrowser. Can make changes to the browser's configuration here via e.g.: possible_browser.returned_browser.returned_system_info = ... """ pass def DidRunStory(self, results): # TODO(kbr): add a test which throws an exception from DidRunStory # to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe . super(FakeSharedPageState, self).DidRunStory(results) class FakeSystemInfo(system_info.SystemInfo): def __init__(self, model_name='', gpu_dict=None, command_line=''): if gpu_dict == None: gpu_dict = fake_gpu_info.FAKE_GPU_INFO super(FakeSystemInfo, self).__init__(model_name, gpu_dict, command_line) class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None, *args, **kwargs): browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs) self.fake_possible_browser = \ FakePossibleBrowser( execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) def CreateBrowserFinderOptions(browser_type=None, execute_on_startup=None, execute_after_browser_creation=None): """Creates fake browser finder options for discovering a browser.""" return _FakeBrowserFinderOptions( browser_type=browser_type, execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) # Internal classes. Note that end users may still need to both call # and mock out methods of these classes, but they should not be # subclassed. class _FakeBrowser(object): def __init__(self, platform): self._tabs = _FakeTabList(self) # Fake the creation of the first tab. self._tabs.New() self._returned_system_info = FakeSystemInfo() self._platform = platform self._browser_type = 'release' self._is_crashed = False @property def platform(self): return self._platform @platform.setter def platform(self, incoming): """Allows overriding of the fake browser's platform object.""" assert isinstance(incoming, FakePlatform) self._platform = incoming @property def returned_system_info(self): """The object which will be returned from calls to GetSystemInfo.""" return self._returned_system_info @returned_system_info.setter def returned_system_info(self, incoming): """Allows overriding of the returned SystemInfo object. Incoming argument must be an instance of FakeSystemInfo.""" assert isinstance(incoming, FakeSystemInfo) self._returned_system_info = incoming @property def browser_type(self): """The browser_type this browser claims to be ('debug', 'release', etc.)""" return self._browser_type @browser_type.setter def browser_type(self, incoming): """Allows setting of the browser_type.""" self._browser_type = incoming @property def credentials(self): return _FakeCredentials() def Close(self): self._is_crashed = False @property def supports_system_info(self): return True def GetSystemInfo(self): return self.returned_system_info @property def supports_tab_control(self): return True @property def tabs(self): return self._tabs def DumpStateUponFailure(self): pass class _FakeCredentials(object): def WarnIfMissingCredentials(self, _): pass class _FakeTracingController(object): def __init__(self): self._is_tracing = False def StartTracing(self, tracing_config, timeout=10): self._is_tracing = True del tracing_config del timeout def StopTracing(self): self._is_tracing = False @property def is_tracing_running(self): return self._is_tracing def ClearStateIfNeeded(self): pass def IsChromeTracingSupported(self): return True class _FakeNetworkController(object): def __init__(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False self.use_live_traffic = None def InitializeIfNeeded(self, use_live_traffic=False): self.use_live_traffic = use_live_traffic def UpdateTrafficSettings(self, round_trip_latency_ms=None, download_bandwidth_kbps=None, upload_bandwidth_kbps=None): pass def Open(self, wpr_mode, extra_wpr_args, use_wpr_go=False): del use_wpr_go # Unused. self.wpr_mode = wpr_mode self.extra_wpr_args = extra_wpr_args self.is_open = True def Close(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False def StartReplay(self, archive_path, make_javascript_deterministic=False): del make_javascript_deterministic # Unused. assert self.is_open self.is_initialized = archive_path is not None def StopReplay(self): self.is_initialized = False class _FakeTab(object): def __init__(self, browser, tab_id): self._browser = browser self._tab_id = str(tab_id) self._collect_garbage_count = 0 self.test_png = None @property def collect_garbage_count(self): return self._collect_garbage_count @property def id(self): return self._tab_id @property def browser(self): return self._browser def WaitForDocumentReadyStateToBeComplete(self, timeout=0): pass def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=0): del script_to_evaluate_on_commit, timeout # unused if url == 'chrome://crash': self.browser._is_crashed = True raise Exception def WaitForDocumentReadyStateToBeInteractiveOrBetter(self, timeout=0): pass def WaitForFrameToBeDisplayed(self, timeout=0): pass def IsAlive(self): return True def CloseConnections(self): pass def CollectGarbage(self): self._collect_garbage_count += 1 def Close(self): pass @property def screenshot_supported(self): return self.test_png is not None def Screenshot(self): assert self.screenshot_supported, 'Screenshot is not supported' return image_util.FromBase64Png(self.test_png) class _FakeTabList(object): _current_tab_id = 0 def __init__(self, browser): self._tabs = [] self._browser = browser def New(self, timeout=300): del timeout # unused type(self)._current_tab_id += 1 t = _FakeTab(self._browser, type(self)._current_tab_id) self._tabs.append(t) return t def __iter__(self): return self._tabs.__iter__() def __len__(self): return len(self._tabs) def __getitem__(self, index): if self._tabs[index].browser._is_crashed: raise Exception else: return self._tabs[index] def GetTabById(self, identifier): """The identifier of a tab can be accessed with tab.id.""" for tab in self._tabs: if tab.id == identifier: return tab return None class FakeInspectorWebsocket(object): _NOTIFICATION_EVENT = 1 _NOTIFICATION_CALLBACK = 2 """A fake InspectorWebsocket. A fake that allows tests to send pregenerated data. Normal InspectorWebsockets allow for any number of domain handlers. This fake only allows up to 1 domain handler, and assumes that the domain of the response always matches that of the handler. """ def __init__(self, mock_timer): self._mock_timer = mock_timer self._notifications = [] self._response_handlers = {} self._pending_callbacks = {} self._handler = None def RegisterDomain(self, _, handler): self._handler = handler def AddEvent(self, method, params, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'params': params} self._notifications.append((response, time, self._NOTIFICATION_EVENT)) def AddAsyncResponse(self, method, result, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'result': result} self._notifications.append((response, time, self._NOTIFICATION_CALLBACK)) def AddResponseHandler(self, method, handler): self._response_handlers[method] = handler def SyncRequest(self, request, *args, **kwargs): del args, kwargs # unused handler = self._response_handlers[request['method']] return handler(request) if handler else None def AsyncRequest(self, request, callback): self._pending_callbacks.setdefault(request['method'], []).append(callback) def SendAndIgnoreResponse(self, request): pass def Connect(self, _): pass def DispatchNotifications(self, timeout): current_time = self._mock_timer.time() if not self._notifications: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() response, time, kind = self._notifications[0] if time - current_time > timeout: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() self._notifications.pop(0) self._mock_timer.SetTime(time + 1) if kind == self._NOTIFICATION_EVENT: self._handler(response) elif kind == self._NOTIFICATION_CALLBACK: callback = self._pending_callbacks.get(response['method']).pop(0) callback(response) else: raise Exception('Unexpected response type') class FakeTimer(object): """ A fake timer to fake out the timing for a module. Args: module: module to fake out the time """ def __init__(self, module=None): self._elapsed_time = 0 self._module = module self._actual_time = None if module: assert isinstance(module, ModuleType) self._actual_time = module.time self._module.time = self def sleep(self, time): self._elapsed_time += time def time(self): return self._elapsed_time def SetTime(self, time): self._elapsed_time = time def __del__(self): self.Restore() def Restore(self): if self._module: self._module.time = self._actual_time self._module = None self._actual_time = None
ConfigurePossibleBrowser
identifier_name
__init__.py
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides fakes for several of Telemetry's internal objects. These allow code like story_runner and Benchmark to be run and tested without compiling or starting a browser. Class names prepended with an underscore are intended to be implementation details, and should not be subclassed; however, some, like _FakeBrowser, have public APIs that may need to be called in tests. """ from telemetry.internal.backends.chrome_inspector import websocket from telemetry.internal.browser import browser_options from telemetry.internal.platform import system_info from telemetry.page import shared_page_state from telemetry.util import image_util from telemetry.testing.internal import fake_gpu_info from types import ModuleType # Classes and functions which are intended to be part of the public # fakes API. class FakePlatform(object): def __init__(self): self._network_controller = None self._tracing_controller = None self._has_battor = False self._os_name = 'FakeOS' self._device_type_name = 'abc' self._is_svelte = False self._is_aosp = True @property def is_host_platform(self): raise NotImplementedError @property def network_controller(self): if self._network_controller is None: self._network_controller = _FakeNetworkController() return self._network_controller @property def tracing_controller(self): if self._tracing_controller is None: self._tracing_controller = _FakeTracingController() return self._tracing_controller def Initialize(self): pass def CanMonitorThermalThrottling(self): return False def IsThermallyThrottled(self): return False def HasBeenThermallyThrottled(self): return False def GetArchName(self): raise NotImplementedError def SetOSName(self, name): self._os_name = name def GetOSName(self): return self._os_name def GetOSVersionName(self): raise NotImplementedError def GetOSVersionDetailString(self): raise NotImplementedError def StopAllLocalServers(self): pass def WaitForBatteryTemperature(self, _): pass def HasBattOrConnected(self): return self._has_battor def SetBattOrDetected(self, b): assert isinstance(b, bool) self._has_battor = b # TODO(rnephew): Investigate moving from setters to @property. def SetDeviceTypeName(self, name): self._device_type_name = name def GetDeviceTypeName(self): return self._device_type_name def SetIsSvelte(self, b): assert isinstance(b, bool) self._is_svelte = b def IsSvelte(self): if self._os_name != 'android': raise NotImplementedError return self._is_svelte
return self._is_aosp and self._os_name == 'android' class FakeLinuxPlatform(FakePlatform): def __init__(self): super(FakeLinuxPlatform, self).__init__() self.screenshot_png_data = None self.http_server_directories = [] self.http_server = FakeHTTPServer() @property def is_host_platform(self): return True def GetDeviceTypeName(self): return 'Desktop' def GetArchName(self): return 'x86_64' def GetOSName(self): return 'linux' def GetOSVersionName(self): return 'trusty' def GetOSVersionDetailString(self): return '' def CanTakeScreenshot(self): return bool(self.screenshot_png_data) def TakeScreenshot(self, file_path): if not self.CanTakeScreenshot(): raise NotImplementedError img = image_util.FromBase64Png(self.screenshot_png_data) image_util.WritePngFile(img, file_path) return True def SetHTTPServerDirectories(self, paths): self.http_server_directories.append(paths) class FakeHTTPServer(object): def UrlOf(self, url): del url # unused return 'file:///foo' class FakePossibleBrowser(object): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None): self._returned_browser = _FakeBrowser(FakeLinuxPlatform()) self.browser_type = 'linux' self.supports_tab_control = False self.is_remote = False self.execute_on_startup = execute_on_startup self.execute_after_browser_creation = execute_after_browser_creation @property def returned_browser(self): """The browser object that will be returned through later API calls.""" return self._returned_browser def Create(self, finder_options): if self.execute_on_startup is not None: self.execute_on_startup() del finder_options # unused if self.execute_after_browser_creation is not None: self.execute_after_browser_creation(self._returned_browser) return self.returned_browser @property def platform(self): """The platform object from the returned browser. To change this or set it up, change the returned browser's platform. """ return self.returned_browser.platform def IsRemote(self): return self.is_remote def SetCredentialsPath(self, _): pass class FakeSharedPageState(shared_page_state.SharedPageState): def __init__(self, test, finder_options, story_set): super(FakeSharedPageState, self).__init__(test, finder_options, story_set) def _GetPossibleBrowser(self, test, finder_options): p = FakePossibleBrowser() self.ConfigurePossibleBrowser(p) return p def ConfigurePossibleBrowser(self, possible_browser): """Override this to configure the PossibleBrowser. Can make changes to the browser's configuration here via e.g.: possible_browser.returned_browser.returned_system_info = ... """ pass def DidRunStory(self, results): # TODO(kbr): add a test which throws an exception from DidRunStory # to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe . super(FakeSharedPageState, self).DidRunStory(results) class FakeSystemInfo(system_info.SystemInfo): def __init__(self, model_name='', gpu_dict=None, command_line=''): if gpu_dict == None: gpu_dict = fake_gpu_info.FAKE_GPU_INFO super(FakeSystemInfo, self).__init__(model_name, gpu_dict, command_line) class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None, *args, **kwargs): browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs) self.fake_possible_browser = \ FakePossibleBrowser( execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) def CreateBrowserFinderOptions(browser_type=None, execute_on_startup=None, execute_after_browser_creation=None): """Creates fake browser finder options for discovering a browser.""" return _FakeBrowserFinderOptions( browser_type=browser_type, execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) # Internal classes. Note that end users may still need to both call # and mock out methods of these classes, but they should not be # subclassed. class _FakeBrowser(object): def __init__(self, platform): self._tabs = _FakeTabList(self) # Fake the creation of the first tab. self._tabs.New() self._returned_system_info = FakeSystemInfo() self._platform = platform self._browser_type = 'release' self._is_crashed = False @property def platform(self): return self._platform @platform.setter def platform(self, incoming): """Allows overriding of the fake browser's platform object.""" assert isinstance(incoming, FakePlatform) self._platform = incoming @property def returned_system_info(self): """The object which will be returned from calls to GetSystemInfo.""" return self._returned_system_info @returned_system_info.setter def returned_system_info(self, incoming): """Allows overriding of the returned SystemInfo object. Incoming argument must be an instance of FakeSystemInfo.""" assert isinstance(incoming, FakeSystemInfo) self._returned_system_info = incoming @property def browser_type(self): """The browser_type this browser claims to be ('debug', 'release', etc.)""" return self._browser_type @browser_type.setter def browser_type(self, incoming): """Allows setting of the browser_type.""" self._browser_type = incoming @property def credentials(self): return _FakeCredentials() def Close(self): self._is_crashed = False @property def supports_system_info(self): return True def GetSystemInfo(self): return self.returned_system_info @property def supports_tab_control(self): return True @property def tabs(self): return self._tabs def DumpStateUponFailure(self): pass class _FakeCredentials(object): def WarnIfMissingCredentials(self, _): pass class _FakeTracingController(object): def __init__(self): self._is_tracing = False def StartTracing(self, tracing_config, timeout=10): self._is_tracing = True del tracing_config del timeout def StopTracing(self): self._is_tracing = False @property def is_tracing_running(self): return self._is_tracing def ClearStateIfNeeded(self): pass def IsChromeTracingSupported(self): return True class _FakeNetworkController(object): def __init__(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False self.use_live_traffic = None def InitializeIfNeeded(self, use_live_traffic=False): self.use_live_traffic = use_live_traffic def UpdateTrafficSettings(self, round_trip_latency_ms=None, download_bandwidth_kbps=None, upload_bandwidth_kbps=None): pass def Open(self, wpr_mode, extra_wpr_args, use_wpr_go=False): del use_wpr_go # Unused. self.wpr_mode = wpr_mode self.extra_wpr_args = extra_wpr_args self.is_open = True def Close(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False def StartReplay(self, archive_path, make_javascript_deterministic=False): del make_javascript_deterministic # Unused. assert self.is_open self.is_initialized = archive_path is not None def StopReplay(self): self.is_initialized = False class _FakeTab(object): def __init__(self, browser, tab_id): self._browser = browser self._tab_id = str(tab_id) self._collect_garbage_count = 0 self.test_png = None @property def collect_garbage_count(self): return self._collect_garbage_count @property def id(self): return self._tab_id @property def browser(self): return self._browser def WaitForDocumentReadyStateToBeComplete(self, timeout=0): pass def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=0): del script_to_evaluate_on_commit, timeout # unused if url == 'chrome://crash': self.browser._is_crashed = True raise Exception def WaitForDocumentReadyStateToBeInteractiveOrBetter(self, timeout=0): pass def WaitForFrameToBeDisplayed(self, timeout=0): pass def IsAlive(self): return True def CloseConnections(self): pass def CollectGarbage(self): self._collect_garbage_count += 1 def Close(self): pass @property def screenshot_supported(self): return self.test_png is not None def Screenshot(self): assert self.screenshot_supported, 'Screenshot is not supported' return image_util.FromBase64Png(self.test_png) class _FakeTabList(object): _current_tab_id = 0 def __init__(self, browser): self._tabs = [] self._browser = browser def New(self, timeout=300): del timeout # unused type(self)._current_tab_id += 1 t = _FakeTab(self._browser, type(self)._current_tab_id) self._tabs.append(t) return t def __iter__(self): return self._tabs.__iter__() def __len__(self): return len(self._tabs) def __getitem__(self, index): if self._tabs[index].browser._is_crashed: raise Exception else: return self._tabs[index] def GetTabById(self, identifier): """The identifier of a tab can be accessed with tab.id.""" for tab in self._tabs: if tab.id == identifier: return tab return None class FakeInspectorWebsocket(object): _NOTIFICATION_EVENT = 1 _NOTIFICATION_CALLBACK = 2 """A fake InspectorWebsocket. A fake that allows tests to send pregenerated data. Normal InspectorWebsockets allow for any number of domain handlers. This fake only allows up to 1 domain handler, and assumes that the domain of the response always matches that of the handler. """ def __init__(self, mock_timer): self._mock_timer = mock_timer self._notifications = [] self._response_handlers = {} self._pending_callbacks = {} self._handler = None def RegisterDomain(self, _, handler): self._handler = handler def AddEvent(self, method, params, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'params': params} self._notifications.append((response, time, self._NOTIFICATION_EVENT)) def AddAsyncResponse(self, method, result, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'result': result} self._notifications.append((response, time, self._NOTIFICATION_CALLBACK)) def AddResponseHandler(self, method, handler): self._response_handlers[method] = handler def SyncRequest(self, request, *args, **kwargs): del args, kwargs # unused handler = self._response_handlers[request['method']] return handler(request) if handler else None def AsyncRequest(self, request, callback): self._pending_callbacks.setdefault(request['method'], []).append(callback) def SendAndIgnoreResponse(self, request): pass def Connect(self, _): pass def DispatchNotifications(self, timeout): current_time = self._mock_timer.time() if not self._notifications: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() response, time, kind = self._notifications[0] if time - current_time > timeout: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() self._notifications.pop(0) self._mock_timer.SetTime(time + 1) if kind == self._NOTIFICATION_EVENT: self._handler(response) elif kind == self._NOTIFICATION_CALLBACK: callback = self._pending_callbacks.get(response['method']).pop(0) callback(response) else: raise Exception('Unexpected response type') class FakeTimer(object): """ A fake timer to fake out the timing for a module. Args: module: module to fake out the time """ def __init__(self, module=None): self._elapsed_time = 0 self._module = module self._actual_time = None if module: assert isinstance(module, ModuleType) self._actual_time = module.time self._module.time = self def sleep(self, time): self._elapsed_time += time def time(self): return self._elapsed_time def SetTime(self, time): self._elapsed_time = time def __del__(self): self.Restore() def Restore(self): if self._module: self._module.time = self._actual_time self._module = None self._actual_time = None
def SetIsAosp(self, b): assert isinstance(b, bool) self._is_aosp = b def IsAosp(self):
random_line_split
__init__.py
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides fakes for several of Telemetry's internal objects. These allow code like story_runner and Benchmark to be run and tested without compiling or starting a browser. Class names prepended with an underscore are intended to be implementation details, and should not be subclassed; however, some, like _FakeBrowser, have public APIs that may need to be called in tests. """ from telemetry.internal.backends.chrome_inspector import websocket from telemetry.internal.browser import browser_options from telemetry.internal.platform import system_info from telemetry.page import shared_page_state from telemetry.util import image_util from telemetry.testing.internal import fake_gpu_info from types import ModuleType # Classes and functions which are intended to be part of the public # fakes API. class FakePlatform(object): def __init__(self): self._network_controller = None self._tracing_controller = None self._has_battor = False self._os_name = 'FakeOS' self._device_type_name = 'abc' self._is_svelte = False self._is_aosp = True @property def is_host_platform(self): raise NotImplementedError @property def network_controller(self): if self._network_controller is None: self._network_controller = _FakeNetworkController() return self._network_controller @property def tracing_controller(self): if self._tracing_controller is None: self._tracing_controller = _FakeTracingController() return self._tracing_controller def Initialize(self): pass def CanMonitorThermalThrottling(self): return False def IsThermallyThrottled(self): return False def HasBeenThermallyThrottled(self): return False def GetArchName(self): raise NotImplementedError def SetOSName(self, name): self._os_name = name def GetOSName(self): return self._os_name def GetOSVersionName(self): raise NotImplementedError def GetOSVersionDetailString(self): raise NotImplementedError def StopAllLocalServers(self): pass def WaitForBatteryTemperature(self, _): pass def HasBattOrConnected(self): return self._has_battor def SetBattOrDetected(self, b): assert isinstance(b, bool) self._has_battor = b # TODO(rnephew): Investigate moving from setters to @property. def SetDeviceTypeName(self, name): self._device_type_name = name def GetDeviceTypeName(self): return self._device_type_name def SetIsSvelte(self, b): assert isinstance(b, bool) self._is_svelte = b def IsSvelte(self): if self._os_name != 'android': raise NotImplementedError return self._is_svelte def SetIsAosp(self, b): assert isinstance(b, bool) self._is_aosp = b def IsAosp(self): return self._is_aosp and self._os_name == 'android' class FakeLinuxPlatform(FakePlatform): def __init__(self): super(FakeLinuxPlatform, self).__init__() self.screenshot_png_data = None self.http_server_directories = [] self.http_server = FakeHTTPServer() @property def is_host_platform(self): return True def GetDeviceTypeName(self): return 'Desktop' def GetArchName(self): return 'x86_64' def GetOSName(self): return 'linux' def GetOSVersionName(self): return 'trusty' def GetOSVersionDetailString(self): return '' def CanTakeScreenshot(self): return bool(self.screenshot_png_data) def TakeScreenshot(self, file_path): if not self.CanTakeScreenshot(): raise NotImplementedError img = image_util.FromBase64Png(self.screenshot_png_data) image_util.WritePngFile(img, file_path) return True def SetHTTPServerDirectories(self, paths): self.http_server_directories.append(paths) class FakeHTTPServer(object): def UrlOf(self, url): del url # unused return 'file:///foo' class FakePossibleBrowser(object): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None): self._returned_browser = _FakeBrowser(FakeLinuxPlatform()) self.browser_type = 'linux' self.supports_tab_control = False self.is_remote = False self.execute_on_startup = execute_on_startup self.execute_after_browser_creation = execute_after_browser_creation @property def returned_browser(self): """The browser object that will be returned through later API calls.""" return self._returned_browser def Create(self, finder_options): if self.execute_on_startup is not None: self.execute_on_startup() del finder_options # unused if self.execute_after_browser_creation is not None: self.execute_after_browser_creation(self._returned_browser) return self.returned_browser @property def platform(self): """The platform object from the returned browser. To change this or set it up, change the returned browser's platform. """ return self.returned_browser.platform def IsRemote(self): return self.is_remote def SetCredentialsPath(self, _): pass class FakeSharedPageState(shared_page_state.SharedPageState): def __init__(self, test, finder_options, story_set): super(FakeSharedPageState, self).__init__(test, finder_options, story_set) def _GetPossibleBrowser(self, test, finder_options): p = FakePossibleBrowser() self.ConfigurePossibleBrowser(p) return p def ConfigurePossibleBrowser(self, possible_browser): """Override this to configure the PossibleBrowser. Can make changes to the browser's configuration here via e.g.: possible_browser.returned_browser.returned_system_info = ... """ pass def DidRunStory(self, results): # TODO(kbr): add a test which throws an exception from DidRunStory # to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe . super(FakeSharedPageState, self).DidRunStory(results) class FakeSystemInfo(system_info.SystemInfo): def __init__(self, model_name='', gpu_dict=None, command_line=''): if gpu_dict == None: gpu_dict = fake_gpu_info.FAKE_GPU_INFO super(FakeSystemInfo, self).__init__(model_name, gpu_dict, command_line) class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None, *args, **kwargs): browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs) self.fake_possible_browser = \ FakePossibleBrowser( execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) def CreateBrowserFinderOptions(browser_type=None, execute_on_startup=None, execute_after_browser_creation=None): """Creates fake browser finder options for discovering a browser.""" return _FakeBrowserFinderOptions( browser_type=browser_type, execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) # Internal classes. Note that end users may still need to both call # and mock out methods of these classes, but they should not be # subclassed. class _FakeBrowser(object): def __init__(self, platform): self._tabs = _FakeTabList(self) # Fake the creation of the first tab. self._tabs.New() self._returned_system_info = FakeSystemInfo() self._platform = platform self._browser_type = 'release' self._is_crashed = False @property def platform(self): return self._platform @platform.setter def platform(self, incoming): """Allows overriding of the fake browser's platform object.""" assert isinstance(incoming, FakePlatform) self._platform = incoming @property def returned_system_info(self): """The object which will be returned from calls to GetSystemInfo.""" return self._returned_system_info @returned_system_info.setter def returned_system_info(self, incoming): """Allows overriding of the returned SystemInfo object. Incoming argument must be an instance of FakeSystemInfo.""" assert isinstance(incoming, FakeSystemInfo) self._returned_system_info = incoming @property def browser_type(self): """The browser_type this browser claims to be ('debug', 'release', etc.)""" return self._browser_type @browser_type.setter def browser_type(self, incoming): """Allows setting of the browser_type.""" self._browser_type = incoming @property def credentials(self): return _FakeCredentials() def Close(self): self._is_crashed = False @property def supports_system_info(self): return True def GetSystemInfo(self): return self.returned_system_info @property def supports_tab_control(self): return True @property def tabs(self): return self._tabs def DumpStateUponFailure(self): pass class _FakeCredentials(object): def WarnIfMissingCredentials(self, _): pass class _FakeTracingController(object): def __init__(self): self._is_tracing = False def StartTracing(self, tracing_config, timeout=10): self._is_tracing = True del tracing_config del timeout def StopTracing(self): self._is_tracing = False @property def is_tracing_running(self): return self._is_tracing def ClearStateIfNeeded(self): pass def IsChromeTracingSupported(self): return True class _FakeNetworkController(object): def __init__(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False self.use_live_traffic = None def InitializeIfNeeded(self, use_live_traffic=False): self.use_live_traffic = use_live_traffic def UpdateTrafficSettings(self, round_trip_latency_ms=None, download_bandwidth_kbps=None, upload_bandwidth_kbps=None): pass def Open(self, wpr_mode, extra_wpr_args, use_wpr_go=False): del use_wpr_go # Unused. self.wpr_mode = wpr_mode self.extra_wpr_args = extra_wpr_args self.is_open = True def Close(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False def StartReplay(self, archive_path, make_javascript_deterministic=False): del make_javascript_deterministic # Unused. assert self.is_open self.is_initialized = archive_path is not None def StopReplay(self): self.is_initialized = False class _FakeTab(object): def __init__(self, browser, tab_id): self._browser = browser self._tab_id = str(tab_id) self._collect_garbage_count = 0 self.test_png = None @property def collect_garbage_count(self): return self._collect_garbage_count @property def id(self): return self._tab_id @property def browser(self): return self._browser def WaitForDocumentReadyStateToBeComplete(self, timeout=0): pass def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=0): del script_to_evaluate_on_commit, timeout # unused if url == 'chrome://crash': self.browser._is_crashed = True raise Exception def WaitForDocumentReadyStateToBeInteractiveOrBetter(self, timeout=0): pass def WaitForFrameToBeDisplayed(self, timeout=0): pass def IsAlive(self): return True def CloseConnections(self): pass def CollectGarbage(self): self._collect_garbage_count += 1 def Close(self): pass @property def screenshot_supported(self): return self.test_png is not None def Screenshot(self): assert self.screenshot_supported, 'Screenshot is not supported' return image_util.FromBase64Png(self.test_png) class _FakeTabList(object): _current_tab_id = 0 def __init__(self, browser): self._tabs = [] self._browser = browser def New(self, timeout=300): del timeout # unused type(self)._current_tab_id += 1 t = _FakeTab(self._browser, type(self)._current_tab_id) self._tabs.append(t) return t def __iter__(self): return self._tabs.__iter__() def __len__(self): return len(self._tabs) def __getitem__(self, index): if self._tabs[index].browser._is_crashed: raise Exception else: return self._tabs[index] def GetTabById(self, identifier): """The identifier of a tab can be accessed with tab.id.""" for tab in self._tabs: if tab.id == identifier: return tab return None class FakeInspectorWebsocket(object): _NOTIFICATION_EVENT = 1 _NOTIFICATION_CALLBACK = 2 """A fake InspectorWebsocket. A fake that allows tests to send pregenerated data. Normal InspectorWebsockets allow for any number of domain handlers. This fake only allows up to 1 domain handler, and assumes that the domain of the response always matches that of the handler. """ def __init__(self, mock_timer): self._mock_timer = mock_timer self._notifications = [] self._response_handlers = {} self._pending_callbacks = {} self._handler = None def RegisterDomain(self, _, handler): self._handler = handler def AddEvent(self, method, params, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'params': params} self._notifications.append((response, time, self._NOTIFICATION_EVENT)) def AddAsyncResponse(self, method, result, time): if self._notifications:
response = {'method': method, 'result': result} self._notifications.append((response, time, self._NOTIFICATION_CALLBACK)) def AddResponseHandler(self, method, handler): self._response_handlers[method] = handler def SyncRequest(self, request, *args, **kwargs): del args, kwargs # unused handler = self._response_handlers[request['method']] return handler(request) if handler else None def AsyncRequest(self, request, callback): self._pending_callbacks.setdefault(request['method'], []).append(callback) def SendAndIgnoreResponse(self, request): pass def Connect(self, _): pass def DispatchNotifications(self, timeout): current_time = self._mock_timer.time() if not self._notifications: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() response, time, kind = self._notifications[0] if time - current_time > timeout: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() self._notifications.pop(0) self._mock_timer.SetTime(time + 1) if kind == self._NOTIFICATION_EVENT: self._handler(response) elif kind == self._NOTIFICATION_CALLBACK: callback = self._pending_callbacks.get(response['method']).pop(0) callback(response) else: raise Exception('Unexpected response type') class FakeTimer(object): """ A fake timer to fake out the timing for a module. Args: module: module to fake out the time """ def __init__(self, module=None): self._elapsed_time = 0 self._module = module self._actual_time = None if module: assert isinstance(module, ModuleType) self._actual_time = module.time self._module.time = self def sleep(self, time): self._elapsed_time += time def time(self): return self._elapsed_time def SetTime(self, time): self._elapsed_time = time def __del__(self): self.Restore() def Restore(self): if self._module: self._module.time = self._actual_time self._module = None self._actual_time = None
assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.')
conditional_block
get_thumbnails.py
#! /usr/bin/env python """ Author: Gary Foreman Created: August 6, 2016 This script scrapes thumbnail images from thread links in the For Sale: Bass Guitars forum at talkbass.com """ from __future__ import print_function from glob import glob import os import sys import urllib from PIL import Image, ImageOps import pymongo sys.path.append('..') from utilities.utilities import pause_scrape, report_progress MIN_PAUSE_SECONDS = 0.15 MAX_PAUSE_SECONDS = 0.5 REPORT_MESSAGE = 'Scraped image' REPORT_FREQUENCY = 300 DATA_PATH = os.path.join('..', 'data', 'images') def make_data_dir(): """ Checks to see whether DATA_PATH exists. If not, creates it. """ if not os.path.isdir(DATA_PATH): os.makedirs(DATA_PATH) def filename_from_url(thumbnail_url): """ thumbnail_url : a string with a url to a bass image Strips filename from the end of thumbnail_url and prepends DATA_PATH. Also ensures the file extension is jpg """ filename = thumbnail_url.strip('/').split('/')[-1] basename, ext = os.path.splitext(filename) return os.path.join(DATA_PATH, basename + '.jpg') def download_thumb(thumbnail_url): """ thumbnail_url : a string with a url to a bass image Pulls dowm image from thumbnail_url and stores in DATA_DIR """ filename = filename_from_url(thumbnail_url) try: urllib.urlretrieve(thumbnail_url, filename) except IOError: # URL is not an image file pass except UnicodeError: # URL contains non-ASCII characters pass def crop_image(filename): """ filename: a string with the name to a locally stored image file Crops image at filename to 128 x 128 pixels and overwrites original """ try: img = Image.open(filename) img = ImageOps.fit(img, (128, 128), Image.ANTIALIAS) img.save(filename) except NameError: # File does not exist pass except IOError: # Image is corrupted try: os.remove(filename) except OSError: # Filename is too long pass def main(): make_data_dir() # Establish connection to MongoDB open on port 27017 client = pymongo.MongoClient() # Access threads database db = client.for_sale_bass_guitars # Get database documents cursor = db.threads.find() # Get list of images that have already been scraped scraped_image_list = glob(os.path.join(DATA_PATH, '*.jpg')) thumbnail_url_list = [] for document in cursor:
client.close() thumbnail_count = 1 for thumbnail_url in thumbnail_url_list: download_thumb(thumbnail_url) filename = filename_from_url(thumbnail_url) crop_image(filename) pause_scrape(MIN_PAUSE_SECONDS, MAX_PAUSE_SECONDS) report_progress(thumbnail_count, REPORT_MESSAGE, REPORT_FREQUENCY) thumbnail_count += 1 if __name__ == "__main__": main()
thumbnail_url = document[u'image_url'] try: filename = filename_from_url(thumbnail_url) if filename not in scraped_image_list: thumbnail_url_list.append(thumbnail_url) except AttributeError: # thread has no associated thumbnail pass
conditional_block
get_thumbnails.py
#! /usr/bin/env python """ Author: Gary Foreman Created: August 6, 2016 This script scrapes thumbnail images from thread links in the For Sale: Bass Guitars forum at talkbass.com """ from __future__ import print_function from glob import glob import os import sys import urllib from PIL import Image, ImageOps import pymongo sys.path.append('..') from utilities.utilities import pause_scrape, report_progress MIN_PAUSE_SECONDS = 0.15 MAX_PAUSE_SECONDS = 0.5 REPORT_MESSAGE = 'Scraped image' REPORT_FREQUENCY = 300 DATA_PATH = os.path.join('..', 'data', 'images') def make_data_dir(): """ Checks to see whether DATA_PATH exists. If not, creates it. """ if not os.path.isdir(DATA_PATH): os.makedirs(DATA_PATH) def filename_from_url(thumbnail_url): """ thumbnail_url : a string with a url to a bass image Strips filename from the end of thumbnail_url and prepends DATA_PATH. Also ensures the file extension is jpg """ filename = thumbnail_url.strip('/').split('/')[-1] basename, ext = os.path.splitext(filename) return os.path.join(DATA_PATH, basename + '.jpg')
Pulls dowm image from thumbnail_url and stores in DATA_DIR """ filename = filename_from_url(thumbnail_url) try: urllib.urlretrieve(thumbnail_url, filename) except IOError: # URL is not an image file pass except UnicodeError: # URL contains non-ASCII characters pass def crop_image(filename): """ filename: a string with the name to a locally stored image file Crops image at filename to 128 x 128 pixels and overwrites original """ try: img = Image.open(filename) img = ImageOps.fit(img, (128, 128), Image.ANTIALIAS) img.save(filename) except NameError: # File does not exist pass except IOError: # Image is corrupted try: os.remove(filename) except OSError: # Filename is too long pass def main(): make_data_dir() # Establish connection to MongoDB open on port 27017 client = pymongo.MongoClient() # Access threads database db = client.for_sale_bass_guitars # Get database documents cursor = db.threads.find() # Get list of images that have already been scraped scraped_image_list = glob(os.path.join(DATA_PATH, '*.jpg')) thumbnail_url_list = [] for document in cursor: thumbnail_url = document[u'image_url'] try: filename = filename_from_url(thumbnail_url) if filename not in scraped_image_list: thumbnail_url_list.append(thumbnail_url) except AttributeError: # thread has no associated thumbnail pass client.close() thumbnail_count = 1 for thumbnail_url in thumbnail_url_list: download_thumb(thumbnail_url) filename = filename_from_url(thumbnail_url) crop_image(filename) pause_scrape(MIN_PAUSE_SECONDS, MAX_PAUSE_SECONDS) report_progress(thumbnail_count, REPORT_MESSAGE, REPORT_FREQUENCY) thumbnail_count += 1 if __name__ == "__main__": main()
def download_thumb(thumbnail_url): """ thumbnail_url : a string with a url to a bass image
random_line_split
get_thumbnails.py
#! /usr/bin/env python """ Author: Gary Foreman Created: August 6, 2016 This script scrapes thumbnail images from thread links in the For Sale: Bass Guitars forum at talkbass.com """ from __future__ import print_function from glob import glob import os import sys import urllib from PIL import Image, ImageOps import pymongo sys.path.append('..') from utilities.utilities import pause_scrape, report_progress MIN_PAUSE_SECONDS = 0.15 MAX_PAUSE_SECONDS = 0.5 REPORT_MESSAGE = 'Scraped image' REPORT_FREQUENCY = 300 DATA_PATH = os.path.join('..', 'data', 'images') def make_data_dir(): """ Checks to see whether DATA_PATH exists. If not, creates it. """ if not os.path.isdir(DATA_PATH): os.makedirs(DATA_PATH) def filename_from_url(thumbnail_url):
def download_thumb(thumbnail_url): """ thumbnail_url : a string with a url to a bass image Pulls dowm image from thumbnail_url and stores in DATA_DIR """ filename = filename_from_url(thumbnail_url) try: urllib.urlretrieve(thumbnail_url, filename) except IOError: # URL is not an image file pass except UnicodeError: # URL contains non-ASCII characters pass def crop_image(filename): """ filename: a string with the name to a locally stored image file Crops image at filename to 128 x 128 pixels and overwrites original """ try: img = Image.open(filename) img = ImageOps.fit(img, (128, 128), Image.ANTIALIAS) img.save(filename) except NameError: # File does not exist pass except IOError: # Image is corrupted try: os.remove(filename) except OSError: # Filename is too long pass def main(): make_data_dir() # Establish connection to MongoDB open on port 27017 client = pymongo.MongoClient() # Access threads database db = client.for_sale_bass_guitars # Get database documents cursor = db.threads.find() # Get list of images that have already been scraped scraped_image_list = glob(os.path.join(DATA_PATH, '*.jpg')) thumbnail_url_list = [] for document in cursor: thumbnail_url = document[u'image_url'] try: filename = filename_from_url(thumbnail_url) if filename not in scraped_image_list: thumbnail_url_list.append(thumbnail_url) except AttributeError: # thread has no associated thumbnail pass client.close() thumbnail_count = 1 for thumbnail_url in thumbnail_url_list: download_thumb(thumbnail_url) filename = filename_from_url(thumbnail_url) crop_image(filename) pause_scrape(MIN_PAUSE_SECONDS, MAX_PAUSE_SECONDS) report_progress(thumbnail_count, REPORT_MESSAGE, REPORT_FREQUENCY) thumbnail_count += 1 if __name__ == "__main__": main()
""" thumbnail_url : a string with a url to a bass image Strips filename from the end of thumbnail_url and prepends DATA_PATH. Also ensures the file extension is jpg """ filename = thumbnail_url.strip('/').split('/')[-1] basename, ext = os.path.splitext(filename) return os.path.join(DATA_PATH, basename + '.jpg')
identifier_body
get_thumbnails.py
#! /usr/bin/env python """ Author: Gary Foreman Created: August 6, 2016 This script scrapes thumbnail images from thread links in the For Sale: Bass Guitars forum at talkbass.com """ from __future__ import print_function from glob import glob import os import sys import urllib from PIL import Image, ImageOps import pymongo sys.path.append('..') from utilities.utilities import pause_scrape, report_progress MIN_PAUSE_SECONDS = 0.15 MAX_PAUSE_SECONDS = 0.5 REPORT_MESSAGE = 'Scraped image' REPORT_FREQUENCY = 300 DATA_PATH = os.path.join('..', 'data', 'images') def
(): """ Checks to see whether DATA_PATH exists. If not, creates it. """ if not os.path.isdir(DATA_PATH): os.makedirs(DATA_PATH) def filename_from_url(thumbnail_url): """ thumbnail_url : a string with a url to a bass image Strips filename from the end of thumbnail_url and prepends DATA_PATH. Also ensures the file extension is jpg """ filename = thumbnail_url.strip('/').split('/')[-1] basename, ext = os.path.splitext(filename) return os.path.join(DATA_PATH, basename + '.jpg') def download_thumb(thumbnail_url): """ thumbnail_url : a string with a url to a bass image Pulls dowm image from thumbnail_url and stores in DATA_DIR """ filename = filename_from_url(thumbnail_url) try: urllib.urlretrieve(thumbnail_url, filename) except IOError: # URL is not an image file pass except UnicodeError: # URL contains non-ASCII characters pass def crop_image(filename): """ filename: a string with the name to a locally stored image file Crops image at filename to 128 x 128 pixels and overwrites original """ try: img = Image.open(filename) img = ImageOps.fit(img, (128, 128), Image.ANTIALIAS) img.save(filename) except NameError: # File does not exist pass except IOError: # Image is corrupted try: os.remove(filename) except OSError: # Filename is too long pass def main(): make_data_dir() # Establish connection to MongoDB open on port 27017 client = pymongo.MongoClient() # Access threads database db = client.for_sale_bass_guitars # Get database documents cursor = db.threads.find() # Get list of images that have already been scraped scraped_image_list = glob(os.path.join(DATA_PATH, '*.jpg')) thumbnail_url_list = [] for document in cursor: thumbnail_url = document[u'image_url'] try: filename = filename_from_url(thumbnail_url) if filename not in scraped_image_list: thumbnail_url_list.append(thumbnail_url) except AttributeError: # thread has no associated thumbnail pass client.close() thumbnail_count = 1 for thumbnail_url in thumbnail_url_list: download_thumb(thumbnail_url) filename = filename_from_url(thumbnail_url) crop_image(filename) pause_scrape(MIN_PAUSE_SECONDS, MAX_PAUSE_SECONDS) report_progress(thumbnail_count, REPORT_MESSAGE, REPORT_FREQUENCY) thumbnail_count += 1 if __name__ == "__main__": main()
make_data_dir
identifier_name
generate_provider_logos_collage_image.py
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Script which generates a collage of provider logos from multiple provider # logo files. # # It works in two steps: # # 1. Resize all the provider logo files (reduce the dimensions) # 2. Assemble a final image from the resized images import os import sys import argparse import subprocess import random from os.path import join as pjoin DIMENSIONS = '150x150' # Dimensions of the resized image (<width>x<height>) GEOMETRY = '+4+4' # How to arrange images (+<rows>+<columns>) TO_CREATE_DIRS = ['resized/', 'final/'] def setup(output_path): """ Create missing directories. """ for directory in TO_CREATE_DIRS: final_path = pjoin(output_path, directory) if not os.path.exists(final_path): os.makedirs(final_path) def get_logo_files(input_path): logo_files = os.listdir(input_path) logo_files = [name for name in logo_files if 'resized' not in name and name.endswith('png')] logo_files = [pjoin(input_path, name) for name in logo_files] return logo_files def resize_images(logo_files, output_path): resized_images = [] for logo_file in logo_files:
return resized_images def assemble_final_image(resized_images, output_path): final_name = pjoin(output_path, 'final/logos.png') random.shuffle(resized_images) values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY, 'out_name': final_name} cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s' cmd = cmd % values print('Generating final image: %(name)s' % {'name': final_name}) subprocess.call(cmd, shell=True) def main(input_path, output_path): if not os.path.exists(input_path): print('Path doesn\'t exist: %s' % (input_path)) sys.exit(2) if not os.path.exists(output_path): print('Path doesn\'t exist: %s' % (output_path)) sys.exit(2) logo_files = get_logo_files(input_path=input_path) setup(output_path=output_path) resized_images = resize_images(logo_files=logo_files, output_path=output_path) assemble_final_image(resized_images=resized_images, output_path=output_path) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Assemble provider logos ' ' in a single image') parser.add_argument('--input-path', action='store', help='Path to directory which contains provider ' 'logo files') parser.add_argument('--output-path', action='store', help='Path where the new files will be written') args = parser.parse_args() input_path = os.path.abspath(args.input_path) output_path = os.path.abspath(args.output_path) main(input_path=input_path, output_path=output_path)
name, ext = os.path.splitext(os.path.basename(logo_file)) new_name = '%s%s' % (name, ext) out_name = pjoin(output_path, 'resized/', new_name) print('Resizing image: %(name)s' % {'name': logo_file}) values = {'name': logo_file, 'out_name': out_name, 'dimensions': DIMENSIONS} cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s' cmd = cmd % values subprocess.call(cmd, shell=True) resized_images.append(out_name)
conditional_block
generate_provider_logos_collage_image.py
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Script which generates a collage of provider logos from multiple provider # logo files. # # It works in two steps: # # 1. Resize all the provider logo files (reduce the dimensions) # 2. Assemble a final image from the resized images import os import sys import argparse import subprocess import random from os.path import join as pjoin DIMENSIONS = '150x150' # Dimensions of the resized image (<width>x<height>) GEOMETRY = '+4+4' # How to arrange images (+<rows>+<columns>) TO_CREATE_DIRS = ['resized/', 'final/'] def setup(output_path): """ Create missing directories. """ for directory in TO_CREATE_DIRS: final_path = pjoin(output_path, directory) if not os.path.exists(final_path): os.makedirs(final_path) def get_logo_files(input_path): logo_files = os.listdir(input_path) logo_files = [name for name in logo_files if 'resized' not in name and name.endswith('png')] logo_files = [pjoin(input_path, name) for name in logo_files] return logo_files def resize_images(logo_files, output_path): resized_images = [] for logo_file in logo_files: name, ext = os.path.splitext(os.path.basename(logo_file)) new_name = '%s%s' % (name, ext) out_name = pjoin(output_path, 'resized/', new_name)
print('Resizing image: %(name)s' % {'name': logo_file}) values = {'name': logo_file, 'out_name': out_name, 'dimensions': DIMENSIONS} cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s' cmd = cmd % values subprocess.call(cmd, shell=True) resized_images.append(out_name) return resized_images def assemble_final_image(resized_images, output_path): final_name = pjoin(output_path, 'final/logos.png') random.shuffle(resized_images) values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY, 'out_name': final_name} cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s' cmd = cmd % values print('Generating final image: %(name)s' % {'name': final_name}) subprocess.call(cmd, shell=True) def main(input_path, output_path): if not os.path.exists(input_path): print('Path doesn\'t exist: %s' % (input_path)) sys.exit(2) if not os.path.exists(output_path): print('Path doesn\'t exist: %s' % (output_path)) sys.exit(2) logo_files = get_logo_files(input_path=input_path) setup(output_path=output_path) resized_images = resize_images(logo_files=logo_files, output_path=output_path) assemble_final_image(resized_images=resized_images, output_path=output_path) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Assemble provider logos ' ' in a single image') parser.add_argument('--input-path', action='store', help='Path to directory which contains provider ' 'logo files') parser.add_argument('--output-path', action='store', help='Path where the new files will be written') args = parser.parse_args() input_path = os.path.abspath(args.input_path) output_path = os.path.abspath(args.output_path) main(input_path=input_path, output_path=output_path)
random_line_split
generate_provider_logos_collage_image.py
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Script which generates a collage of provider logos from multiple provider # logo files. # # It works in two steps: # # 1. Resize all the provider logo files (reduce the dimensions) # 2. Assemble a final image from the resized images import os import sys import argparse import subprocess import random from os.path import join as pjoin DIMENSIONS = '150x150' # Dimensions of the resized image (<width>x<height>) GEOMETRY = '+4+4' # How to arrange images (+<rows>+<columns>) TO_CREATE_DIRS = ['resized/', 'final/'] def setup(output_path): """ Create missing directories. """ for directory in TO_CREATE_DIRS: final_path = pjoin(output_path, directory) if not os.path.exists(final_path): os.makedirs(final_path) def
(input_path): logo_files = os.listdir(input_path) logo_files = [name for name in logo_files if 'resized' not in name and name.endswith('png')] logo_files = [pjoin(input_path, name) for name in logo_files] return logo_files def resize_images(logo_files, output_path): resized_images = [] for logo_file in logo_files: name, ext = os.path.splitext(os.path.basename(logo_file)) new_name = '%s%s' % (name, ext) out_name = pjoin(output_path, 'resized/', new_name) print('Resizing image: %(name)s' % {'name': logo_file}) values = {'name': logo_file, 'out_name': out_name, 'dimensions': DIMENSIONS} cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s' cmd = cmd % values subprocess.call(cmd, shell=True) resized_images.append(out_name) return resized_images def assemble_final_image(resized_images, output_path): final_name = pjoin(output_path, 'final/logos.png') random.shuffle(resized_images) values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY, 'out_name': final_name} cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s' cmd = cmd % values print('Generating final image: %(name)s' % {'name': final_name}) subprocess.call(cmd, shell=True) def main(input_path, output_path): if not os.path.exists(input_path): print('Path doesn\'t exist: %s' % (input_path)) sys.exit(2) if not os.path.exists(output_path): print('Path doesn\'t exist: %s' % (output_path)) sys.exit(2) logo_files = get_logo_files(input_path=input_path) setup(output_path=output_path) resized_images = resize_images(logo_files=logo_files, output_path=output_path) assemble_final_image(resized_images=resized_images, output_path=output_path) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Assemble provider logos ' ' in a single image') parser.add_argument('--input-path', action='store', help='Path to directory which contains provider ' 'logo files') parser.add_argument('--output-path', action='store', help='Path where the new files will be written') args = parser.parse_args() input_path = os.path.abspath(args.input_path) output_path = os.path.abspath(args.output_path) main(input_path=input_path, output_path=output_path)
get_logo_files
identifier_name
generate_provider_logos_collage_image.py
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Script which generates a collage of provider logos from multiple provider # logo files. # # It works in two steps: # # 1. Resize all the provider logo files (reduce the dimensions) # 2. Assemble a final image from the resized images import os import sys import argparse import subprocess import random from os.path import join as pjoin DIMENSIONS = '150x150' # Dimensions of the resized image (<width>x<height>) GEOMETRY = '+4+4' # How to arrange images (+<rows>+<columns>) TO_CREATE_DIRS = ['resized/', 'final/'] def setup(output_path): """ Create missing directories. """ for directory in TO_CREATE_DIRS: final_path = pjoin(output_path, directory) if not os.path.exists(final_path): os.makedirs(final_path) def get_logo_files(input_path): logo_files = os.listdir(input_path) logo_files = [name for name in logo_files if 'resized' not in name and name.endswith('png')] logo_files = [pjoin(input_path, name) for name in logo_files] return logo_files def resize_images(logo_files, output_path): resized_images = [] for logo_file in logo_files: name, ext = os.path.splitext(os.path.basename(logo_file)) new_name = '%s%s' % (name, ext) out_name = pjoin(output_path, 'resized/', new_name) print('Resizing image: %(name)s' % {'name': logo_file}) values = {'name': logo_file, 'out_name': out_name, 'dimensions': DIMENSIONS} cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s' cmd = cmd % values subprocess.call(cmd, shell=True) resized_images.append(out_name) return resized_images def assemble_final_image(resized_images, output_path): final_name = pjoin(output_path, 'final/logos.png') random.shuffle(resized_images) values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY, 'out_name': final_name} cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s' cmd = cmd % values print('Generating final image: %(name)s' % {'name': final_name}) subprocess.call(cmd, shell=True) def main(input_path, output_path):
if __name__ == '__main__': parser = argparse.ArgumentParser(description='Assemble provider logos ' ' in a single image') parser.add_argument('--input-path', action='store', help='Path to directory which contains provider ' 'logo files') parser.add_argument('--output-path', action='store', help='Path where the new files will be written') args = parser.parse_args() input_path = os.path.abspath(args.input_path) output_path = os.path.abspath(args.output_path) main(input_path=input_path, output_path=output_path)
if not os.path.exists(input_path): print('Path doesn\'t exist: %s' % (input_path)) sys.exit(2) if not os.path.exists(output_path): print('Path doesn\'t exist: %s' % (output_path)) sys.exit(2) logo_files = get_logo_files(input_path=input_path) setup(output_path=output_path) resized_images = resize_images(logo_files=logo_files, output_path=output_path) assemble_final_image(resized_images=resized_images, output_path=output_path)
identifier_body
main.rs
extern crate rand; use std::io; use std::cmp::Ordering; use rand::Rng; fn
() { println!("Guess the number!"); let secret_number = rand::thread_rng().gen_range(1, 101); //println!("The secret number is: {}", secret_number); loop { println!("Please input your guess."); let mut guess = String::new(); io::stdin().read_line(&mut guess) .expect("failed to read line"); let guess: u32 = match guess.trim().parse() { Ok(num) => num, Err(_) => continue, }; println!("You guessed: {}", guess); match guess.cmp(&secret_number) { Ordering::Less => println!("Too small!"), Ordering::Greater => println!("Too big!"), Ordering::Equal => { println!("You win!"); break; } } } }
main
identifier_name
main.rs
extern crate rand; use std::io; use std::cmp::Ordering; use rand::Rng; fn main() { println!("Guess the number!"); let secret_number = rand::thread_rng().gen_range(1, 101); //println!("The secret number is: {}", secret_number); loop { println!("Please input your guess."); let mut guess = String::new(); io::stdin().read_line(&mut guess) .expect("failed to read line"); let guess: u32 = match guess.trim().parse() { Ok(num) => num, Err(_) => continue,
println!("You guessed: {}", guess); match guess.cmp(&secret_number) { Ordering::Less => println!("Too small!"), Ordering::Greater => println!("Too big!"), Ordering::Equal => { println!("You win!"); break; } } } }
};
random_line_split
main.rs
extern crate rand; use std::io; use std::cmp::Ordering; use rand::Rng; fn main()
{ println!("Guess the number!"); let secret_number = rand::thread_rng().gen_range(1, 101); //println!("The secret number is: {}", secret_number); loop { println!("Please input your guess."); let mut guess = String::new(); io::stdin().read_line(&mut guess) .expect("failed to read line"); let guess: u32 = match guess.trim().parse() { Ok(num) => num, Err(_) => continue, }; println!("You guessed: {}", guess); match guess.cmp(&secret_number) { Ordering::Less => println!("Too small!"), Ordering::Greater => println!("Too big!"), Ordering::Equal => { println!("You win!"); break; } } } }
identifier_body
problem3.rs
/* Run tests with; * * rustc --test problem3.rs ; ./problem3 * */ fn prime_factors(mut n: i64) -> Vec<i64> { let mut divisor = 2; let mut factors: Vec<i64> = Vec::new(); while divisor <= (n as f64).sqrt() as i64 { if n%divisor == 0
else { divisor += 1; } } factors.push(n); return factors; } pub fn main() { let factors = prime_factors(600851475143); let largest_prime_factor = factors.last().unwrap(); println!("largest prime factor == {}", largest_prime_factor); } #[cfg(test)] mod test { use super::prime_factors; #[test] fn correct_answer() { let factors = prime_factors(600851475143); let expected_answer = 6857; let computed_answer = *factors.last().unwrap(); assert_eq!(computed_answer, expected_answer); } }
{ factors.push(divisor); n = n / divisor; divisor = 2; }
conditional_block
problem3.rs
/* Run tests with; * * rustc --test problem3.rs ; ./problem3 * */ fn prime_factors(mut n: i64) -> Vec<i64> { let mut divisor = 2;
n = n / divisor; divisor = 2; } else { divisor += 1; } } factors.push(n); return factors; } pub fn main() { let factors = prime_factors(600851475143); let largest_prime_factor = factors.last().unwrap(); println!("largest prime factor == {}", largest_prime_factor); } #[cfg(test)] mod test { use super::prime_factors; #[test] fn correct_answer() { let factors = prime_factors(600851475143); let expected_answer = 6857; let computed_answer = *factors.last().unwrap(); assert_eq!(computed_answer, expected_answer); } }
let mut factors: Vec<i64> = Vec::new(); while divisor <= (n as f64).sqrt() as i64 { if n%divisor == 0 { factors.push(divisor);
random_line_split
problem3.rs
/* Run tests with; * * rustc --test problem3.rs ; ./problem3 * */ fn prime_factors(mut n: i64) -> Vec<i64> { let mut divisor = 2; let mut factors: Vec<i64> = Vec::new(); while divisor <= (n as f64).sqrt() as i64 { if n%divisor == 0 { factors.push(divisor); n = n / divisor; divisor = 2; } else { divisor += 1; } } factors.push(n); return factors; } pub fn main()
#[cfg(test)] mod test { use super::prime_factors; #[test] fn correct_answer() { let factors = prime_factors(600851475143); let expected_answer = 6857; let computed_answer = *factors.last().unwrap(); assert_eq!(computed_answer, expected_answer); } }
{ let factors = prime_factors(600851475143); let largest_prime_factor = factors.last().unwrap(); println!("largest prime factor == {}", largest_prime_factor); }
identifier_body
problem3.rs
/* Run tests with; * * rustc --test problem3.rs ; ./problem3 * */ fn prime_factors(mut n: i64) -> Vec<i64> { let mut divisor = 2; let mut factors: Vec<i64> = Vec::new(); while divisor <= (n as f64).sqrt() as i64 { if n%divisor == 0 { factors.push(divisor); n = n / divisor; divisor = 2; } else { divisor += 1; } } factors.push(n); return factors; } pub fn
() { let factors = prime_factors(600851475143); let largest_prime_factor = factors.last().unwrap(); println!("largest prime factor == {}", largest_prime_factor); } #[cfg(test)] mod test { use super::prime_factors; #[test] fn correct_answer() { let factors = prime_factors(600851475143); let expected_answer = 6857; let computed_answer = *factors.last().unwrap(); assert_eq!(computed_answer, expected_answer); } }
main
identifier_name
main.js
(function() { $(function() { $('.tooltip-examples a, .tooltip-paragraph-examples a').tooltip({
$('.top-sign-in').on("click", function(e) { $('.login-box').fadeIn("fast"); return false; }); $('.login-box-close').on("click", function(e) { $(this).closest(".login-box").fadeOut("fast"); return false; }); prettyPrint(); $(".slider-browser-center").animate({ bottom: $(".slider-browser-center").data('position-bottom') }, "fast", function() { return $(".slider-browser-left").animate({ bottom: $(".slider-browser-left").data('position-bottom') }, "fast", function() { return $(".slider-browser-right").animate({ bottom: $(".slider-browser-right").data('position-bottom') }, "fast"); }); }); $('.carousel').carousel({ interval: false }); return $('a[data-toggle="testimonial"]').on("click", function(e) { $(this).closest('.testimonials-users').find('a[data-toggle="testimonial"]').removeClass("active"); $(this).addClass("active"); $('.testimonials-speech').removeClass('active'); $('.testimonials-speech' + $(this).attr('href')).addClass('active'); return false; }); }); $("body").on("touchstart.dropdown", ".dropdown-menu", function(e) { return e.stopPropagation(); }); return $(document).on("click", ".dropdown-menu a", function() { return document.location = $(this).attr("href"); }); }).call(this);
animation: false });
random_line_split
change-dev-machine-dialog.controller.ts
/* * Copyright (c) 2015-2017 Red Hat, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Red Hat, Inc. - initial API and implementation */ 'use strict'; import {IEnvironmentManagerMachine} from '../../../../../components/api/environment/environment-manager-machine'; /** * This class is handling the controller for the change machines dialog. * * @author Oleksii Orel */ export class ChangeDevMachineDialogController { /** * Material design Dialog service. */ private $mdDialog: ng.material.IDialogService; /** * Current devMachine name. * Passed from parent controller. */ private currentDevMachineName: string; /** * List of machines. * Passed from parent controller. */ private machinesList: Array<IEnvironmentManagerMachine>; /** * Popup title. * Passed from parent controller. */ private popupTitle: string; /** * Change button title. * Passed from parent controller. */ private okButtonTitle: string; /** * Callback which is called when change DEV machine. * Passed from parent controller. */ private changeDevMachine: (machineName: string) => void; /** * Popup's message. */ private message: string; /** * Machine name which will be configured as dev-machine */ private newDevMachine: string; /** * Default constructor that is using resource injection * @ngInject for Dependency injection */ constructor($mdDialog: ng.material.IDialogService) { this.$mdDialog = $mdDialog; if (!angular.isArray(this.machinesList)) { this.machinesList = []; } if (!this.popupTitle) { this.popupTitle = 'Change DEV machine'; } if (!this.okButtonTitle)
this.message = this.machinesList.length > 1 ? 'Select the machine to get ws-agent activated:' : 'You can\'t change it without having other machines configured.'; } /** * Returns list of machines not including current dev machine. * * @return {Array<IEnvironmentManagerMachine>} */ getMachinesList(): Array<IEnvironmentManagerMachine> { return this.machinesList.filter((machine: IEnvironmentManagerMachine) => { return machine.name !== this.currentDevMachineName; }); } /** * Cancels this dialog. */ cancel(): void { this.$mdDialog.cancel(); } /** * Changes DEV machine. */ onDevChange(): void { if (angular.isFunction(this.changeDevMachine)) { this.changeDevMachine(this.newDevMachine); } this.$mdDialog.hide(); } }
{ this.okButtonTitle = 'OK'; }
conditional_block
change-dev-machine-dialog.controller.ts
/* * Copyright (c) 2015-2017 Red Hat, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Red Hat, Inc. - initial API and implementation */ 'use strict'; import {IEnvironmentManagerMachine} from '../../../../../components/api/environment/environment-manager-machine'; /** * This class is handling the controller for the change machines dialog. * * @author Oleksii Orel */ export class
{ /** * Material design Dialog service. */ private $mdDialog: ng.material.IDialogService; /** * Current devMachine name. * Passed from parent controller. */ private currentDevMachineName: string; /** * List of machines. * Passed from parent controller. */ private machinesList: Array<IEnvironmentManagerMachine>; /** * Popup title. * Passed from parent controller. */ private popupTitle: string; /** * Change button title. * Passed from parent controller. */ private okButtonTitle: string; /** * Callback which is called when change DEV machine. * Passed from parent controller. */ private changeDevMachine: (machineName: string) => void; /** * Popup's message. */ private message: string; /** * Machine name which will be configured as dev-machine */ private newDevMachine: string; /** * Default constructor that is using resource injection * @ngInject for Dependency injection */ constructor($mdDialog: ng.material.IDialogService) { this.$mdDialog = $mdDialog; if (!angular.isArray(this.machinesList)) { this.machinesList = []; } if (!this.popupTitle) { this.popupTitle = 'Change DEV machine'; } if (!this.okButtonTitle) { this.okButtonTitle = 'OK'; } this.message = this.machinesList.length > 1 ? 'Select the machine to get ws-agent activated:' : 'You can\'t change it without having other machines configured.'; } /** * Returns list of machines not including current dev machine. * * @return {Array<IEnvironmentManagerMachine>} */ getMachinesList(): Array<IEnvironmentManagerMachine> { return this.machinesList.filter((machine: IEnvironmentManagerMachine) => { return machine.name !== this.currentDevMachineName; }); } /** * Cancels this dialog. */ cancel(): void { this.$mdDialog.cancel(); } /** * Changes DEV machine. */ onDevChange(): void { if (angular.isFunction(this.changeDevMachine)) { this.changeDevMachine(this.newDevMachine); } this.$mdDialog.hide(); } }
ChangeDevMachineDialogController
identifier_name
change-dev-machine-dialog.controller.ts
/* * Copyright (c) 2015-2017 Red Hat, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Red Hat, Inc. - initial API and implementation */ 'use strict'; import {IEnvironmentManagerMachine} from '../../../../../components/api/environment/environment-manager-machine'; /** * This class is handling the controller for the change machines dialog. * * @author Oleksii Orel */ export class ChangeDevMachineDialogController {
/** * Material design Dialog service. */ private $mdDialog: ng.material.IDialogService; /** * Current devMachine name. * Passed from parent controller. */ private currentDevMachineName: string; /** * List of machines. * Passed from parent controller. */ private machinesList: Array<IEnvironmentManagerMachine>; /** * Popup title. * Passed from parent controller. */ private popupTitle: string; /** * Change button title. * Passed from parent controller. */ private okButtonTitle: string; /** * Callback which is called when change DEV machine. * Passed from parent controller. */ private changeDevMachine: (machineName: string) => void; /** * Popup's message. */ private message: string; /** * Machine name which will be configured as dev-machine */ private newDevMachine: string; /** * Default constructor that is using resource injection * @ngInject for Dependency injection */ constructor($mdDialog: ng.material.IDialogService) { this.$mdDialog = $mdDialog; if (!angular.isArray(this.machinesList)) { this.machinesList = []; } if (!this.popupTitle) { this.popupTitle = 'Change DEV machine'; } if (!this.okButtonTitle) { this.okButtonTitle = 'OK'; } this.message = this.machinesList.length > 1 ? 'Select the machine to get ws-agent activated:' : 'You can\'t change it without having other machines configured.'; } /** * Returns list of machines not including current dev machine. * * @return {Array<IEnvironmentManagerMachine>} */ getMachinesList(): Array<IEnvironmentManagerMachine> { return this.machinesList.filter((machine: IEnvironmentManagerMachine) => { return machine.name !== this.currentDevMachineName; }); } /** * Cancels this dialog. */ cancel(): void { this.$mdDialog.cancel(); } /** * Changes DEV machine. */ onDevChange(): void { if (angular.isFunction(this.changeDevMachine)) { this.changeDevMachine(this.newDevMachine); } this.$mdDialog.hide(); } }
random_line_split
change-dev-machine-dialog.controller.ts
/* * Copyright (c) 2015-2017 Red Hat, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Red Hat, Inc. - initial API and implementation */ 'use strict'; import {IEnvironmentManagerMachine} from '../../../../../components/api/environment/environment-manager-machine'; /** * This class is handling the controller for the change machines dialog. * * @author Oleksii Orel */ export class ChangeDevMachineDialogController { /** * Material design Dialog service. */ private $mdDialog: ng.material.IDialogService; /** * Current devMachine name. * Passed from parent controller. */ private currentDevMachineName: string; /** * List of machines. * Passed from parent controller. */ private machinesList: Array<IEnvironmentManagerMachine>; /** * Popup title. * Passed from parent controller. */ private popupTitle: string; /** * Change button title. * Passed from parent controller. */ private okButtonTitle: string; /** * Callback which is called when change DEV machine. * Passed from parent controller. */ private changeDevMachine: (machineName: string) => void; /** * Popup's message. */ private message: string; /** * Machine name which will be configured as dev-machine */ private newDevMachine: string; /** * Default constructor that is using resource injection * @ngInject for Dependency injection */ constructor($mdDialog: ng.material.IDialogService) { this.$mdDialog = $mdDialog; if (!angular.isArray(this.machinesList)) { this.machinesList = []; } if (!this.popupTitle) { this.popupTitle = 'Change DEV machine'; } if (!this.okButtonTitle) { this.okButtonTitle = 'OK'; } this.message = this.machinesList.length > 1 ? 'Select the machine to get ws-agent activated:' : 'You can\'t change it without having other machines configured.'; } /** * Returns list of machines not including current dev machine. * * @return {Array<IEnvironmentManagerMachine>} */ getMachinesList(): Array<IEnvironmentManagerMachine> { return this.machinesList.filter((machine: IEnvironmentManagerMachine) => { return machine.name !== this.currentDevMachineName; }); } /** * Cancels this dialog. */ cancel(): void
/** * Changes DEV machine. */ onDevChange(): void { if (angular.isFunction(this.changeDevMachine)) { this.changeDevMachine(this.newDevMachine); } this.$mdDialog.hide(); } }
{ this.$mdDialog.cancel(); }
identifier_body
centos5.py
#!/usr/bin/python # # centos5.py - A webKickstart module to handle changes needed from # RHEL 5 to CentOS 5 Kickstart generation. # # Copyright 2007 NC State University # Written by Jack Neely <jjneely@ncsu.edu> # # SDG # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. from baseRealmLinuxKickstart import baseRealmLinuxKickstart class Kickstart(baseRealmLinuxKickstart): def
(self, url, cfg, sc=None): baseRealmLinuxKickstart.__init__(self, url, cfg, sc) self.buildOrder.remove(self.installationNumber) self.buildOrder.remove(self.RHN)
__init__
identifier_name
centos5.py
#!/usr/bin/python # # centos5.py - A webKickstart module to handle changes needed from # RHEL 5 to CentOS 5 Kickstart generation. # # Copyright 2007 NC State University # Written by Jack Neely <jjneely@ncsu.edu> # # SDG # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. from baseRealmLinuxKickstart import baseRealmLinuxKickstart class Kickstart(baseRealmLinuxKickstart):
def __init__(self, url, cfg, sc=None): baseRealmLinuxKickstart.__init__(self, url, cfg, sc) self.buildOrder.remove(self.installationNumber) self.buildOrder.remove(self.RHN)
identifier_body
centos5.py
#!/usr/bin/python # # centos5.py - A webKickstart module to handle changes needed from # RHEL 5 to CentOS 5 Kickstart generation. # # Copyright 2007 NC State University # Written by Jack Neely <jjneely@ncsu.edu> # # SDG # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
def __init__(self, url, cfg, sc=None): baseRealmLinuxKickstart.__init__(self, url, cfg, sc) self.buildOrder.remove(self.installationNumber) self.buildOrder.remove(self.RHN)
from baseRealmLinuxKickstart import baseRealmLinuxKickstart class Kickstart(baseRealmLinuxKickstart):
random_line_split
css-subpixelfont.js
//= require modernizr /*
*/ Modernizr.addTest('subpixelfont', function() { var bool, styles = "#modernizr{position: absolute; top: -10em; visibility:hidden; font: normal 10px arial;}#subpixel{float: left; font-size: 33.3333%;}"; // see https://github.com/Modernizr/Modernizr/blob/master/modernizr.js#L97 Modernizr.testStyles(styles, function(elem) { var subpixel = elem.firstChild; subpixel.innerHTML = 'This is a text written in Arial'; bool = window.getComputedStyle ? window.getComputedStyle(subpixel, null).getPropertyValue("width") !== '44px' : false; }, 1, ['subpixel']); return bool; });
* Test for SubPixel Font Rendering * (to infer if GDI or DirectWrite is used on Windows) * Authors: @derSchepp, @gerritvanaaken, @rodneyrehm, @yatil, @ryanseddon * Web: https://github.com/gerritvanaaken/subpixeldetect
random_line_split
ckeditor.files.js
/** * Nooku Framework - http://www.nooku.org * * @copyright Copyright (C) 2011 - 2017 Johan Janssens and Timble CVBA. (http://www.timble.net) * @license GNU AGPLv3 <https://www.gnu.org/licenses/agpl.html> * @link https://github.com/timble/openpolice-platform */ if(!Ckeditor) var Ckeditor = {}; Ckeditor.Files = new Class({ Extends: Files.App, Implements: [Events, Options], options: { types: ['file', 'image'], editor: null, preview: 'files-preview', grid: { cookie: false, layout: 'compact', batch_delete: false }, history: { enabled: false } }, initialize: function(options) { this.parent(options); this.editor = this.options.editor; this.preview = document.id(this.options.preview); }, setPaginator: function() { }, setPathway: function() { }, setState: function() { // TODO: Implement pagination into the view this.fireEvent('beforeSetState'); var opts = this.options.state; this.state = new Files.State(opts); this.fireEvent('afterSetState'); }, setGrid: function() { var opts = this.options.grid; var that = this; $extend(opts, { 'onClickImage': function(e) { that.setPreview(document.id(e.target), 'image'); }, 'onClickFile': function(e) { that.setPreview(document.id(e.target), 'file'); } }); this.grid = new Files.Grid(this.options.grid.element, opts); }, setPreview: function(target, type) { var node = target.getParent('.files-node-shadow') || target.getParent('.files-node'); var row = node.retrieve('row'); var copy = $extend({}, row); var path = row.baseurl+"/"+row.filepath; var url = path.replace(Files.sitebase+'/', '').replace(/files\/[^\/]+\//, ''); // Update active row node.getParent().getChildren().removeClass('active'); node.addClass('active'); // Load preview template copy.template = 'details_'+type; this.preview.empty(); copy.render('compact').inject(this.preview); // Inject preview image if (type == 'image') { this.preview.getElement('img').set('src', copy.image); } // When no text is selected use the file name if (type == 'file')
document.id('image-url').set('value', url); document.id('image-type').set('value',row.metadata.mimetype); } });
{ if(document.id('image-text').get('value') == ""){ document.id('image-text').set('value', row.name); } }
conditional_block
ckeditor.files.js
/** * Nooku Framework - http://www.nooku.org * * @copyright Copyright (C) 2011 - 2017 Johan Janssens and Timble CVBA. (http://www.timble.net) * @license GNU AGPLv3 <https://www.gnu.org/licenses/agpl.html> * @link https://github.com/timble/openpolice-platform */ if(!Ckeditor) var Ckeditor = {}; Ckeditor.Files = new Class({ Extends: Files.App, Implements: [Events, Options], options: { types: ['file', 'image'], editor: null, preview: 'files-preview', grid: { cookie: false, layout: 'compact', batch_delete: false }, history: { enabled: false } }, initialize: function(options) { this.parent(options); this.editor = this.options.editor; this.preview = document.id(this.options.preview); }, setPaginator: function() { }, setPathway: function() { }, setState: function() { // TODO: Implement pagination into the view this.fireEvent('beforeSetState'); var opts = this.options.state; this.state = new Files.State(opts); this.fireEvent('afterSetState'); }, setGrid: function() { var opts = this.options.grid; var that = this; $extend(opts, { 'onClickImage': function(e) { that.setPreview(document.id(e.target), 'image'); }, 'onClickFile': function(e) { that.setPreview(document.id(e.target), 'file'); } }); this.grid = new Files.Grid(this.options.grid.element, opts); }, setPreview: function(target, type) { var node = target.getParent('.files-node-shadow') || target.getParent('.files-node'); var row = node.retrieve('row'); var copy = $extend({}, row); var path = row.baseurl+"/"+row.filepath; var url = path.replace(Files.sitebase+'/', '').replace(/files\/[^\/]+\//, ''); // Update active row node.getParent().getChildren().removeClass('active'); node.addClass('active'); // Load preview template copy.template = 'details_'+type;
this.preview.empty(); copy.render('compact').inject(this.preview); // Inject preview image if (type == 'image') { this.preview.getElement('img').set('src', copy.image); } // When no text is selected use the file name if (type == 'file') { if(document.id('image-text').get('value') == ""){ document.id('image-text').set('value', row.name); } } document.id('image-url').set('value', url); document.id('image-type').set('value',row.metadata.mimetype); } });
random_line_split
ndvi_difference.py
#!/usr/bin/env python2 """Example of server-side computations used in global forest change analysis. In this example we will focus on server side computation using NDVI and EVI data. This both metrics are computed bands created by third party companies or directly taken by the satellites. NDVI and EVI are two metrics used in global forest change analysis. They represent the forest concentration in a specific area. We will use the MOD13A1 vegetation indice provided by the NASA [1]. The goal is to generate an RGB image, where reds stands for deforestation, gree for reforestation and blue for masked data (e.g. rivers, oceans...). [1] https://code.earthengine.google.com/dataset/MODIS/MOD13A1 """ import ee # Initialize the Earth Engine ee.Initialize() # Small rectangle used to generate the image, over the Amazonian forest. # The location is above the Rondonia (West of Bresil). rectangle = ee.Geometry.Rectangle(-68, -7, -65, -8) # Get the MODIS dataset. collection = ee.ImageCollection('MODIS/MOD13A1') # Select the EVI, since it is more accurate on this dataset. You can also # use the NDVI band here. collection = collection.select(['EVI']) # Get two dataset, one over the year 2000 and the other one over 2015 ndvi2000 = collection.filterDate('2000-01-01', '2000-12-31').median() ndvi2015 = collection.filterDate('2015-01-01', '2015-12-31').median() # Substract the two datasets to see the evolution between both of them. difference = ndvi2015.subtract(ndvi2000) # Use a mask to avoid showing data on rivers. # TODO(funkysayu) move this mask to blue color. classifiedImage = ee.Image('MODIS/051/MCD12Q1/2001_01_01') mask = classifiedImage.select(['Land_Cover_Type_1']) maskedDifference = difference.updateMask(mask) # Convert it to RGB image.
visualized = maskedDifference.visualize( min=-2000, max=2000, palette='FF0000, 000000, 00FF00', ) # Finally generate the PNG. print visualized.getDownloadUrl({ 'region': rectangle.toGeoJSONString(), 'scale': 500, 'format': 'png', })
random_line_split
utils.ts
import { Observable, of, Subscriber } from 'rxjs'; import { delay, take, tap } from 'rxjs/operators'; import { ShareButtonFuncArgs } from './share.models';
*/ function isObject(item): boolean { return (item && typeof item === 'object' && !Array.isArray(item)); } /** * Deep merge two objects. */ export function mergeDeep(target, ...sources) { if (!sources.length) { return target; } const source = sources.shift(); if (isObject(target) && isObject(source)) { for (const key in source) { if (isObject(source[key])) { if (!target[key]) { Object.assign(target, {[key]: {}}); } mergeDeep(target[key], source[key]); } else { Object.assign(target, {[key]: source[key]}); } } } return mergeDeep(target, ...sources); } /** Returns a valid URL or falls back to current URL */ export function getValidUrl(url: string, fallbackUrl: string): string { if (url) { const r = /(http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?/; if (r.test(url)) { return url; } console.warn(`[ShareButtons]: Sharing link '${ url }' is invalid!`); } return fallbackUrl; } export function printPage(): Observable<void> { return new Observable((sub: Subscriber<any>) => document.defaultView.print()); } export function copyToClipboard({params, data, clipboard, updater}: ShareButtonFuncArgs<CopyToClipboardDataArgs>): Observable<void> { return of(null).pipe( tap(() => { clipboard.copy(params.url); // Disable copy button updater.next({icon: data.successIcon, text: data.successText, disabled: true}); }), delay(data.delay), tap(() => updater.next({icon: data.icon, text: data.text, disabled: false})), take(1) ); } interface CopyToClipboardDataArgs { delay: number; text: string; icon: string[]; successText: string; successIcon: string[]; }
/** * Simple object check.
random_line_split
utils.ts
import { Observable, of, Subscriber } from 'rxjs'; import { delay, take, tap } from 'rxjs/operators'; import { ShareButtonFuncArgs } from './share.models'; /** * Simple object check. */ function isObject(item): boolean { return (item && typeof item === 'object' && !Array.isArray(item)); } /** * Deep merge two objects. */ export function mergeDeep(target, ...sources) { if (!sources.length) { return target; } const source = sources.shift(); if (isObject(target) && isObject(source)) { for (const key in source) { if (isObject(source[key])) { if (!target[key]) { Object.assign(target, {[key]: {}}); } mergeDeep(target[key], source[key]); } else { Object.assign(target, {[key]: source[key]}); } } } return mergeDeep(target, ...sources); } /** Returns a valid URL or falls back to current URL */ export function
(url: string, fallbackUrl: string): string { if (url) { const r = /(http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?/; if (r.test(url)) { return url; } console.warn(`[ShareButtons]: Sharing link '${ url }' is invalid!`); } return fallbackUrl; } export function printPage(): Observable<void> { return new Observable((sub: Subscriber<any>) => document.defaultView.print()); } export function copyToClipboard({params, data, clipboard, updater}: ShareButtonFuncArgs<CopyToClipboardDataArgs>): Observable<void> { return of(null).pipe( tap(() => { clipboard.copy(params.url); // Disable copy button updater.next({icon: data.successIcon, text: data.successText, disabled: true}); }), delay(data.delay), tap(() => updater.next({icon: data.icon, text: data.text, disabled: false})), take(1) ); } interface CopyToClipboardDataArgs { delay: number; text: string; icon: string[]; successText: string; successIcon: string[]; }
getValidUrl
identifier_name
utils.ts
import { Observable, of, Subscriber } from 'rxjs'; import { delay, take, tap } from 'rxjs/operators'; import { ShareButtonFuncArgs } from './share.models'; /** * Simple object check. */ function isObject(item): boolean { return (item && typeof item === 'object' && !Array.isArray(item)); } /** * Deep merge two objects. */ export function mergeDeep(target, ...sources) { if (!sources.length) { return target; } const source = sources.shift(); if (isObject(target) && isObject(source)) { for (const key in source) { if (isObject(source[key])) { if (!target[key]) { Object.assign(target, {[key]: {}}); } mergeDeep(target[key], source[key]); } else { Object.assign(target, {[key]: source[key]}); } } } return mergeDeep(target, ...sources); } /** Returns a valid URL or falls back to current URL */ export function getValidUrl(url: string, fallbackUrl: string): string { if (url) { const r = /(http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?/; if (r.test(url))
console.warn(`[ShareButtons]: Sharing link '${ url }' is invalid!`); } return fallbackUrl; } export function printPage(): Observable<void> { return new Observable((sub: Subscriber<any>) => document.defaultView.print()); } export function copyToClipboard({params, data, clipboard, updater}: ShareButtonFuncArgs<CopyToClipboardDataArgs>): Observable<void> { return of(null).pipe( tap(() => { clipboard.copy(params.url); // Disable copy button updater.next({icon: data.successIcon, text: data.successText, disabled: true}); }), delay(data.delay), tap(() => updater.next({icon: data.icon, text: data.text, disabled: false})), take(1) ); } interface CopyToClipboardDataArgs { delay: number; text: string; icon: string[]; successText: string; successIcon: string[]; }
{ return url; }
conditional_block
utils.ts
import { Observable, of, Subscriber } from 'rxjs'; import { delay, take, tap } from 'rxjs/operators'; import { ShareButtonFuncArgs } from './share.models'; /** * Simple object check. */ function isObject(item): boolean { return (item && typeof item === 'object' && !Array.isArray(item)); } /** * Deep merge two objects. */ export function mergeDeep(target, ...sources)
/** Returns a valid URL or falls back to current URL */ export function getValidUrl(url: string, fallbackUrl: string): string { if (url) { const r = /(http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?/; if (r.test(url)) { return url; } console.warn(`[ShareButtons]: Sharing link '${ url }' is invalid!`); } return fallbackUrl; } export function printPage(): Observable<void> { return new Observable((sub: Subscriber<any>) => document.defaultView.print()); } export function copyToClipboard({params, data, clipboard, updater}: ShareButtonFuncArgs<CopyToClipboardDataArgs>): Observable<void> { return of(null).pipe( tap(() => { clipboard.copy(params.url); // Disable copy button updater.next({icon: data.successIcon, text: data.successText, disabled: true}); }), delay(data.delay), tap(() => updater.next({icon: data.icon, text: data.text, disabled: false})), take(1) ); } interface CopyToClipboardDataArgs { delay: number; text: string; icon: string[]; successText: string; successIcon: string[]; }
{ if (!sources.length) { return target; } const source = sources.shift(); if (isObject(target) && isObject(source)) { for (const key in source) { if (isObject(source[key])) { if (!target[key]) { Object.assign(target, {[key]: {}}); } mergeDeep(target[key], source[key]); } else { Object.assign(target, {[key]: source[key]}); } } } return mergeDeep(target, ...sources); }
identifier_body
model.py
"""Machine learning model""" from copy import deepcopy import logging from operator import itemgetter from pathlib import Path import shutil from tempfile import TemporaryDirectory from typing import List, Tuple, Dict, Any, Callable import tensorflow as tf from tensorflow.estimator import ModeKeys, Estimator from tensorflow.python.training.tracking.tracking import AutoTrackable LOGGER = logging.getLogger(__name__) DATASET = { ModeKeys.TRAIN: 'train', ModeKeys.EVAL: 'valid', ModeKeys.PREDICT: 'test', } class HyperParameter: """Model hyper parameters""" BATCH_SIZE = 100 NB_TOKENS = 10000 VOCABULARY_SIZE = 5000 EMBEDDING_SIZE = max(10, int(VOCABULARY_SIZE**0.5)) DNN_HIDDEN_UNITS = [512, 32] DNN_DROPOUT = 0.5 N_GRAM = 2 class Training: """Model training parameters""" SHUFFLE_BUFFER = HyperParameter.BATCH_SIZE * 10 CHECKPOINT_STEPS = 1000 LONG_TRAINING_STEPS = 10 * CHECKPOINT_STEPS SHORT_DELAY = 60 LONG_DELAY = 5 * SHORT_DELAY def load(saved_model_dir: str) -> AutoTrackable: """Load a Tensorflow saved model""" return tf.saved_model.load(saved_model_dir) def build(model_dir: str, labels: List[str]) -> Estimator: """Build a Tensorflow text classifier """ config = tf.estimator.RunConfig( model_dir=model_dir, save_checkpoints_steps=Training.CHECKPOINT_STEPS, ) categorical_column = tf.feature_column.categorical_column_with_hash_bucket( key='content', hash_bucket_size=HyperParameter.VOCABULARY_SIZE, ) dense_column = tf.feature_column.embedding_column( categorical_column=categorical_column, dimension=HyperParameter.EMBEDDING_SIZE, ) return tf.estimator.DNNLinearCombinedClassifier( linear_feature_columns=[categorical_column],
dnn_feature_columns=[dense_column], dnn_hidden_units=HyperParameter.DNN_HIDDEN_UNITS, dnn_dropout=HyperParameter.DNN_DROPOUT, label_vocabulary=labels, n_classes=len(labels), config=config, ) def train(estimator: Estimator, data_root_dir: str, max_steps: int) -> Any: """Train a Tensorflow estimator""" train_spec = tf.estimator.TrainSpec( input_fn=_build_input_fn(data_root_dir, ModeKeys.TRAIN), max_steps=max_steps, ) if max_steps > Training.LONG_TRAINING_STEPS: throttle_secs = Training.LONG_DELAY else: throttle_secs = Training.SHORT_DELAY eval_spec = tf.estimator.EvalSpec( input_fn=_build_input_fn(data_root_dir, ModeKeys.EVAL), start_delay_secs=Training.SHORT_DELAY, throttle_secs=throttle_secs, ) LOGGER.debug('Train the model') results = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) training_metrics = results[0] return training_metrics def save(estimator: Estimator, saved_model_dir: str) -> None: """Save a Tensorflow estimator""" with TemporaryDirectory() as temporary_model_base_dir: export_dir = estimator.export_saved_model( temporary_model_base_dir, _serving_input_receiver_fn ) Path(saved_model_dir).mkdir(exist_ok=True) export_path = Path(export_dir.decode()).absolute() for path in export_path.glob('*'): shutil.move(str(path), saved_model_dir) def test( saved_model: AutoTrackable, data_root_dir: str, mapping: Dict[str, str], ) -> Dict[str, Dict[str, int]]: """Test a Tensorflow saved model""" values = {language: 0 for language in mapping.values()} matches = {language: deepcopy(values) for language in values} LOGGER.debug('Test the model') input_function = _build_input_fn(data_root_dir, ModeKeys.PREDICT) for test_item in input_function(): content = test_item[0] label = test_item[1].numpy()[0].decode() result = saved_model.signatures['predict'](content) predicted = result['classes'].numpy()[0][0].decode() label_language = mapping[label] predicted_language = mapping[predicted] matches[label_language][predicted_language] += 1 return matches def predict( saved_model: AutoTrackable, mapping: Dict[str, str], text: str ) -> List[Tuple[str, float]]: """Infer a Tensorflow saved model""" content_tensor = tf.constant([text]) predicted = saved_model.signatures['serving_default'](content_tensor) numpy_floats = predicted['scores'][0].numpy() extensions = predicted['classes'][0].numpy() probability_values = (float(value) for value in numpy_floats) languages = (mapping[ext.decode()] for ext in extensions) unsorted_scores = zip(languages, probability_values) scores = sorted(unsorted_scores, key=itemgetter(1), reverse=True) return scores def _build_input_fn( data_root_dir: str, mode: ModeKeys, ) -> Callable[[], tf.data.Dataset]: """Generate an input fonction for a Tensorflow model""" pattern = str(Path(data_root_dir).joinpath(DATASET[mode], '*')) def input_function() -> tf.data.Dataset: dataset = tf.data.Dataset dataset = dataset.list_files(pattern, shuffle=True).map(_read_file) if mode == ModeKeys.PREDICT: return dataset.batch(1) if mode == ModeKeys.TRAIN: dataset = dataset.shuffle(Training.SHUFFLE_BUFFER).repeat() return dataset.map(_preprocess).batch(HyperParameter.BATCH_SIZE) return input_function def _serving_input_receiver_fn() -> tf.estimator.export.ServingInputReceiver: """Function to serve model for predictions.""" content = tf.compat.v1.placeholder(tf.string, [None]) receiver_tensors = {'content': content} features = {'content': tf.map_fn(_preprocess_text, content)} return tf.estimator.export.ServingInputReceiver( receiver_tensors=receiver_tensors, features=features, ) def _read_file(filename: str) -> Tuple[tf.Tensor, tf.Tensor]: """Read a source file, return the content and the extension""" data = tf.io.read_file(filename) label = tf.strings.split([filename], '.').values[-1] return data, label def _preprocess( data: tf.Tensor, label: tf.Tensor, ) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]: """Process input data as part of a workflow""" data = _preprocess_text(data) return {'content': data}, label def _preprocess_text(data: tf.Tensor) -> tf.Tensor: """Feature engineering""" padding = tf.constant(['']*HyperParameter.NB_TOKENS) data = tf.strings.bytes_split(data) data = tf.strings.ngrams(data, HyperParameter.N_GRAM) data = tf.concat((data, padding), axis=0) data = data[:HyperParameter.NB_TOKENS] return data
random_line_split
model.py
"""Machine learning model""" from copy import deepcopy import logging from operator import itemgetter from pathlib import Path import shutil from tempfile import TemporaryDirectory from typing import List, Tuple, Dict, Any, Callable import tensorflow as tf from tensorflow.estimator import ModeKeys, Estimator from tensorflow.python.training.tracking.tracking import AutoTrackable LOGGER = logging.getLogger(__name__) DATASET = { ModeKeys.TRAIN: 'train', ModeKeys.EVAL: 'valid', ModeKeys.PREDICT: 'test', } class
: """Model hyper parameters""" BATCH_SIZE = 100 NB_TOKENS = 10000 VOCABULARY_SIZE = 5000 EMBEDDING_SIZE = max(10, int(VOCABULARY_SIZE**0.5)) DNN_HIDDEN_UNITS = [512, 32] DNN_DROPOUT = 0.5 N_GRAM = 2 class Training: """Model training parameters""" SHUFFLE_BUFFER = HyperParameter.BATCH_SIZE * 10 CHECKPOINT_STEPS = 1000 LONG_TRAINING_STEPS = 10 * CHECKPOINT_STEPS SHORT_DELAY = 60 LONG_DELAY = 5 * SHORT_DELAY def load(saved_model_dir: str) -> AutoTrackable: """Load a Tensorflow saved model""" return tf.saved_model.load(saved_model_dir) def build(model_dir: str, labels: List[str]) -> Estimator: """Build a Tensorflow text classifier """ config = tf.estimator.RunConfig( model_dir=model_dir, save_checkpoints_steps=Training.CHECKPOINT_STEPS, ) categorical_column = tf.feature_column.categorical_column_with_hash_bucket( key='content', hash_bucket_size=HyperParameter.VOCABULARY_SIZE, ) dense_column = tf.feature_column.embedding_column( categorical_column=categorical_column, dimension=HyperParameter.EMBEDDING_SIZE, ) return tf.estimator.DNNLinearCombinedClassifier( linear_feature_columns=[categorical_column], dnn_feature_columns=[dense_column], dnn_hidden_units=HyperParameter.DNN_HIDDEN_UNITS, dnn_dropout=HyperParameter.DNN_DROPOUT, label_vocabulary=labels, n_classes=len(labels), config=config, ) def train(estimator: Estimator, data_root_dir: str, max_steps: int) -> Any: """Train a Tensorflow estimator""" train_spec = tf.estimator.TrainSpec( input_fn=_build_input_fn(data_root_dir, ModeKeys.TRAIN), max_steps=max_steps, ) if max_steps > Training.LONG_TRAINING_STEPS: throttle_secs = Training.LONG_DELAY else: throttle_secs = Training.SHORT_DELAY eval_spec = tf.estimator.EvalSpec( input_fn=_build_input_fn(data_root_dir, ModeKeys.EVAL), start_delay_secs=Training.SHORT_DELAY, throttle_secs=throttle_secs, ) LOGGER.debug('Train the model') results = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) training_metrics = results[0] return training_metrics def save(estimator: Estimator, saved_model_dir: str) -> None: """Save a Tensorflow estimator""" with TemporaryDirectory() as temporary_model_base_dir: export_dir = estimator.export_saved_model( temporary_model_base_dir, _serving_input_receiver_fn ) Path(saved_model_dir).mkdir(exist_ok=True) export_path = Path(export_dir.decode()).absolute() for path in export_path.glob('*'): shutil.move(str(path), saved_model_dir) def test( saved_model: AutoTrackable, data_root_dir: str, mapping: Dict[str, str], ) -> Dict[str, Dict[str, int]]: """Test a Tensorflow saved model""" values = {language: 0 for language in mapping.values()} matches = {language: deepcopy(values) for language in values} LOGGER.debug('Test the model') input_function = _build_input_fn(data_root_dir, ModeKeys.PREDICT) for test_item in input_function(): content = test_item[0] label = test_item[1].numpy()[0].decode() result = saved_model.signatures['predict'](content) predicted = result['classes'].numpy()[0][0].decode() label_language = mapping[label] predicted_language = mapping[predicted] matches[label_language][predicted_language] += 1 return matches def predict( saved_model: AutoTrackable, mapping: Dict[str, str], text: str ) -> List[Tuple[str, float]]: """Infer a Tensorflow saved model""" content_tensor = tf.constant([text]) predicted = saved_model.signatures['serving_default'](content_tensor) numpy_floats = predicted['scores'][0].numpy() extensions = predicted['classes'][0].numpy() probability_values = (float(value) for value in numpy_floats) languages = (mapping[ext.decode()] for ext in extensions) unsorted_scores = zip(languages, probability_values) scores = sorted(unsorted_scores, key=itemgetter(1), reverse=True) return scores def _build_input_fn( data_root_dir: str, mode: ModeKeys, ) -> Callable[[], tf.data.Dataset]: """Generate an input fonction for a Tensorflow model""" pattern = str(Path(data_root_dir).joinpath(DATASET[mode], '*')) def input_function() -> tf.data.Dataset: dataset = tf.data.Dataset dataset = dataset.list_files(pattern, shuffle=True).map(_read_file) if mode == ModeKeys.PREDICT: return dataset.batch(1) if mode == ModeKeys.TRAIN: dataset = dataset.shuffle(Training.SHUFFLE_BUFFER).repeat() return dataset.map(_preprocess).batch(HyperParameter.BATCH_SIZE) return input_function def _serving_input_receiver_fn() -> tf.estimator.export.ServingInputReceiver: """Function to serve model for predictions.""" content = tf.compat.v1.placeholder(tf.string, [None]) receiver_tensors = {'content': content} features = {'content': tf.map_fn(_preprocess_text, content)} return tf.estimator.export.ServingInputReceiver( receiver_tensors=receiver_tensors, features=features, ) def _read_file(filename: str) -> Tuple[tf.Tensor, tf.Tensor]: """Read a source file, return the content and the extension""" data = tf.io.read_file(filename) label = tf.strings.split([filename], '.').values[-1] return data, label def _preprocess( data: tf.Tensor, label: tf.Tensor, ) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]: """Process input data as part of a workflow""" data = _preprocess_text(data) return {'content': data}, label def _preprocess_text(data: tf.Tensor) -> tf.Tensor: """Feature engineering""" padding = tf.constant(['']*HyperParameter.NB_TOKENS) data = tf.strings.bytes_split(data) data = tf.strings.ngrams(data, HyperParameter.N_GRAM) data = tf.concat((data, padding), axis=0) data = data[:HyperParameter.NB_TOKENS] return data
HyperParameter
identifier_name
model.py
"""Machine learning model""" from copy import deepcopy import logging from operator import itemgetter from pathlib import Path import shutil from tempfile import TemporaryDirectory from typing import List, Tuple, Dict, Any, Callable import tensorflow as tf from tensorflow.estimator import ModeKeys, Estimator from tensorflow.python.training.tracking.tracking import AutoTrackable LOGGER = logging.getLogger(__name__) DATASET = { ModeKeys.TRAIN: 'train', ModeKeys.EVAL: 'valid', ModeKeys.PREDICT: 'test', } class HyperParameter: """Model hyper parameters""" BATCH_SIZE = 100 NB_TOKENS = 10000 VOCABULARY_SIZE = 5000 EMBEDDING_SIZE = max(10, int(VOCABULARY_SIZE**0.5)) DNN_HIDDEN_UNITS = [512, 32] DNN_DROPOUT = 0.5 N_GRAM = 2 class Training: """Model training parameters""" SHUFFLE_BUFFER = HyperParameter.BATCH_SIZE * 10 CHECKPOINT_STEPS = 1000 LONG_TRAINING_STEPS = 10 * CHECKPOINT_STEPS SHORT_DELAY = 60 LONG_DELAY = 5 * SHORT_DELAY def load(saved_model_dir: str) -> AutoTrackable: """Load a Tensorflow saved model""" return tf.saved_model.load(saved_model_dir) def build(model_dir: str, labels: List[str]) -> Estimator: """Build a Tensorflow text classifier """ config = tf.estimator.RunConfig( model_dir=model_dir, save_checkpoints_steps=Training.CHECKPOINT_STEPS, ) categorical_column = tf.feature_column.categorical_column_with_hash_bucket( key='content', hash_bucket_size=HyperParameter.VOCABULARY_SIZE, ) dense_column = tf.feature_column.embedding_column( categorical_column=categorical_column, dimension=HyperParameter.EMBEDDING_SIZE, ) return tf.estimator.DNNLinearCombinedClassifier( linear_feature_columns=[categorical_column], dnn_feature_columns=[dense_column], dnn_hidden_units=HyperParameter.DNN_HIDDEN_UNITS, dnn_dropout=HyperParameter.DNN_DROPOUT, label_vocabulary=labels, n_classes=len(labels), config=config, ) def train(estimator: Estimator, data_root_dir: str, max_steps: int) -> Any: """Train a Tensorflow estimator""" train_spec = tf.estimator.TrainSpec( input_fn=_build_input_fn(data_root_dir, ModeKeys.TRAIN), max_steps=max_steps, ) if max_steps > Training.LONG_TRAINING_STEPS: throttle_secs = Training.LONG_DELAY else: throttle_secs = Training.SHORT_DELAY eval_spec = tf.estimator.EvalSpec( input_fn=_build_input_fn(data_root_dir, ModeKeys.EVAL), start_delay_secs=Training.SHORT_DELAY, throttle_secs=throttle_secs, ) LOGGER.debug('Train the model') results = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) training_metrics = results[0] return training_metrics def save(estimator: Estimator, saved_model_dir: str) -> None: """Save a Tensorflow estimator""" with TemporaryDirectory() as temporary_model_base_dir: export_dir = estimator.export_saved_model( temporary_model_base_dir, _serving_input_receiver_fn ) Path(saved_model_dir).mkdir(exist_ok=True) export_path = Path(export_dir.decode()).absolute() for path in export_path.glob('*'): shutil.move(str(path), saved_model_dir) def test( saved_model: AutoTrackable, data_root_dir: str, mapping: Dict[str, str], ) -> Dict[str, Dict[str, int]]: """Test a Tensorflow saved model""" values = {language: 0 for language in mapping.values()} matches = {language: deepcopy(values) for language in values} LOGGER.debug('Test the model') input_function = _build_input_fn(data_root_dir, ModeKeys.PREDICT) for test_item in input_function(): content = test_item[0] label = test_item[1].numpy()[0].decode() result = saved_model.signatures['predict'](content) predicted = result['classes'].numpy()[0][0].decode() label_language = mapping[label] predicted_language = mapping[predicted] matches[label_language][predicted_language] += 1 return matches def predict( saved_model: AutoTrackable, mapping: Dict[str, str], text: str ) -> List[Tuple[str, float]]: """Infer a Tensorflow saved model""" content_tensor = tf.constant([text]) predicted = saved_model.signatures['serving_default'](content_tensor) numpy_floats = predicted['scores'][0].numpy() extensions = predicted['classes'][0].numpy() probability_values = (float(value) for value in numpy_floats) languages = (mapping[ext.decode()] for ext in extensions) unsorted_scores = zip(languages, probability_values) scores = sorted(unsorted_scores, key=itemgetter(1), reverse=True) return scores def _build_input_fn( data_root_dir: str, mode: ModeKeys, ) -> Callable[[], tf.data.Dataset]: """Generate an input fonction for a Tensorflow model""" pattern = str(Path(data_root_dir).joinpath(DATASET[mode], '*')) def input_function() -> tf.data.Dataset: dataset = tf.data.Dataset dataset = dataset.list_files(pattern, shuffle=True).map(_read_file) if mode == ModeKeys.PREDICT: return dataset.batch(1) if mode == ModeKeys.TRAIN: dataset = dataset.shuffle(Training.SHUFFLE_BUFFER).repeat() return dataset.map(_preprocess).batch(HyperParameter.BATCH_SIZE) return input_function def _serving_input_receiver_fn() -> tf.estimator.export.ServingInputReceiver: """Function to serve model for predictions.""" content = tf.compat.v1.placeholder(tf.string, [None]) receiver_tensors = {'content': content} features = {'content': tf.map_fn(_preprocess_text, content)} return tf.estimator.export.ServingInputReceiver( receiver_tensors=receiver_tensors, features=features, ) def _read_file(filename: str) -> Tuple[tf.Tensor, tf.Tensor]: """Read a source file, return the content and the extension""" data = tf.io.read_file(filename) label = tf.strings.split([filename], '.').values[-1] return data, label def _preprocess( data: tf.Tensor, label: tf.Tensor, ) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
def _preprocess_text(data: tf.Tensor) -> tf.Tensor: """Feature engineering""" padding = tf.constant(['']*HyperParameter.NB_TOKENS) data = tf.strings.bytes_split(data) data = tf.strings.ngrams(data, HyperParameter.N_GRAM) data = tf.concat((data, padding), axis=0) data = data[:HyperParameter.NB_TOKENS] return data
"""Process input data as part of a workflow""" data = _preprocess_text(data) return {'content': data}, label
identifier_body
model.py
"""Machine learning model""" from copy import deepcopy import logging from operator import itemgetter from pathlib import Path import shutil from tempfile import TemporaryDirectory from typing import List, Tuple, Dict, Any, Callable import tensorflow as tf from tensorflow.estimator import ModeKeys, Estimator from tensorflow.python.training.tracking.tracking import AutoTrackable LOGGER = logging.getLogger(__name__) DATASET = { ModeKeys.TRAIN: 'train', ModeKeys.EVAL: 'valid', ModeKeys.PREDICT: 'test', } class HyperParameter: """Model hyper parameters""" BATCH_SIZE = 100 NB_TOKENS = 10000 VOCABULARY_SIZE = 5000 EMBEDDING_SIZE = max(10, int(VOCABULARY_SIZE**0.5)) DNN_HIDDEN_UNITS = [512, 32] DNN_DROPOUT = 0.5 N_GRAM = 2 class Training: """Model training parameters""" SHUFFLE_BUFFER = HyperParameter.BATCH_SIZE * 10 CHECKPOINT_STEPS = 1000 LONG_TRAINING_STEPS = 10 * CHECKPOINT_STEPS SHORT_DELAY = 60 LONG_DELAY = 5 * SHORT_DELAY def load(saved_model_dir: str) -> AutoTrackable: """Load a Tensorflow saved model""" return tf.saved_model.load(saved_model_dir) def build(model_dir: str, labels: List[str]) -> Estimator: """Build a Tensorflow text classifier """ config = tf.estimator.RunConfig( model_dir=model_dir, save_checkpoints_steps=Training.CHECKPOINT_STEPS, ) categorical_column = tf.feature_column.categorical_column_with_hash_bucket( key='content', hash_bucket_size=HyperParameter.VOCABULARY_SIZE, ) dense_column = tf.feature_column.embedding_column( categorical_column=categorical_column, dimension=HyperParameter.EMBEDDING_SIZE, ) return tf.estimator.DNNLinearCombinedClassifier( linear_feature_columns=[categorical_column], dnn_feature_columns=[dense_column], dnn_hidden_units=HyperParameter.DNN_HIDDEN_UNITS, dnn_dropout=HyperParameter.DNN_DROPOUT, label_vocabulary=labels, n_classes=len(labels), config=config, ) def train(estimator: Estimator, data_root_dir: str, max_steps: int) -> Any: """Train a Tensorflow estimator""" train_spec = tf.estimator.TrainSpec( input_fn=_build_input_fn(data_root_dir, ModeKeys.TRAIN), max_steps=max_steps, ) if max_steps > Training.LONG_TRAINING_STEPS: throttle_secs = Training.LONG_DELAY else:
eval_spec = tf.estimator.EvalSpec( input_fn=_build_input_fn(data_root_dir, ModeKeys.EVAL), start_delay_secs=Training.SHORT_DELAY, throttle_secs=throttle_secs, ) LOGGER.debug('Train the model') results = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) training_metrics = results[0] return training_metrics def save(estimator: Estimator, saved_model_dir: str) -> None: """Save a Tensorflow estimator""" with TemporaryDirectory() as temporary_model_base_dir: export_dir = estimator.export_saved_model( temporary_model_base_dir, _serving_input_receiver_fn ) Path(saved_model_dir).mkdir(exist_ok=True) export_path = Path(export_dir.decode()).absolute() for path in export_path.glob('*'): shutil.move(str(path), saved_model_dir) def test( saved_model: AutoTrackable, data_root_dir: str, mapping: Dict[str, str], ) -> Dict[str, Dict[str, int]]: """Test a Tensorflow saved model""" values = {language: 0 for language in mapping.values()} matches = {language: deepcopy(values) for language in values} LOGGER.debug('Test the model') input_function = _build_input_fn(data_root_dir, ModeKeys.PREDICT) for test_item in input_function(): content = test_item[0] label = test_item[1].numpy()[0].decode() result = saved_model.signatures['predict'](content) predicted = result['classes'].numpy()[0][0].decode() label_language = mapping[label] predicted_language = mapping[predicted] matches[label_language][predicted_language] += 1 return matches def predict( saved_model: AutoTrackable, mapping: Dict[str, str], text: str ) -> List[Tuple[str, float]]: """Infer a Tensorflow saved model""" content_tensor = tf.constant([text]) predicted = saved_model.signatures['serving_default'](content_tensor) numpy_floats = predicted['scores'][0].numpy() extensions = predicted['classes'][0].numpy() probability_values = (float(value) for value in numpy_floats) languages = (mapping[ext.decode()] for ext in extensions) unsorted_scores = zip(languages, probability_values) scores = sorted(unsorted_scores, key=itemgetter(1), reverse=True) return scores def _build_input_fn( data_root_dir: str, mode: ModeKeys, ) -> Callable[[], tf.data.Dataset]: """Generate an input fonction for a Tensorflow model""" pattern = str(Path(data_root_dir).joinpath(DATASET[mode], '*')) def input_function() -> tf.data.Dataset: dataset = tf.data.Dataset dataset = dataset.list_files(pattern, shuffle=True).map(_read_file) if mode == ModeKeys.PREDICT: return dataset.batch(1) if mode == ModeKeys.TRAIN: dataset = dataset.shuffle(Training.SHUFFLE_BUFFER).repeat() return dataset.map(_preprocess).batch(HyperParameter.BATCH_SIZE) return input_function def _serving_input_receiver_fn() -> tf.estimator.export.ServingInputReceiver: """Function to serve model for predictions.""" content = tf.compat.v1.placeholder(tf.string, [None]) receiver_tensors = {'content': content} features = {'content': tf.map_fn(_preprocess_text, content)} return tf.estimator.export.ServingInputReceiver( receiver_tensors=receiver_tensors, features=features, ) def _read_file(filename: str) -> Tuple[tf.Tensor, tf.Tensor]: """Read a source file, return the content and the extension""" data = tf.io.read_file(filename) label = tf.strings.split([filename], '.').values[-1] return data, label def _preprocess( data: tf.Tensor, label: tf.Tensor, ) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]: """Process input data as part of a workflow""" data = _preprocess_text(data) return {'content': data}, label def _preprocess_text(data: tf.Tensor) -> tf.Tensor: """Feature engineering""" padding = tf.constant(['']*HyperParameter.NB_TOKENS) data = tf.strings.bytes_split(data) data = tf.strings.ngrams(data, HyperParameter.N_GRAM) data = tf.concat((data, padding), axis=0) data = data[:HyperParameter.NB_TOKENS] return data
throttle_secs = Training.SHORT_DELAY
conditional_block
guiTest.py
import os import sys import shutil import errno import time import hashlib from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions if "TRAVIS_BUILD_NUMBER" in os.environ: if "SAUCE_USERNAME" not in os.environ: print "No sauce labs login credentials found. Stopping tests..." sys.exit(0) capabilities = {'browserName': "firefox"} capabilities['platform'] = "Windows 7" capabilities['version'] = "48.0" capabilities['screenResolution'] = "1280x1024" capabilities["build"] = os.environ["TRAVIS_BUILD_NUMBER"] capabilities["tunnel-identifier"] = os.environ["TRAVIS_JOB_NUMBER"] # connect to sauce labs username = os.environ["SAUCE_USERNAME"] access_key = os.environ["SAUCE_ACCESS_KEY"] hub_url = "%s:%s@localhost:4445" % (username, access_key) driver = webdriver.Remote(command_executor="http://%s/wd/hub" % hub_url, desired_capabilities=capabilities) else: # local print "Using LOCAL webdriver" profile = webdriver.FirefoxProfile() profile.set_preference("intl.accept_languages", "en") driver = webdriver.Firefox(profile) driver.maximize_window() def write_random_file(size, filename): if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(filename, 'wb') as fout: fout.write(os.urandom(size)) def sha1_file(filename): BLOCKSIZE = 65536 hasher = hashlib.sha1() with open(filename, 'rb') as afile: buf = afile.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = afile.read(BLOCKSIZE) return hasher.hexdigest() def sha1_folder(folder): sha1_dict = {} for root, dirs, files in os.walk(folder): for filename in files: file_path = os.path.join(root, filename) sha1 = sha1_file(file_path) relative_file_path = os.path.relpath(file_path, folder) sha1_dict.update({relative_file_path: sha1}) return sha1_dict
def wait_for_text(time, xpath, text): WebDriverWait(driver, time).until(expected_conditions.text_to_be_present_in_element((By.XPATH, xpath), text)) BACKUP_NAME = "BackupName" PASSWORD = "the_backup_password_is_really_long_and_safe" SOURCE_FOLDER = os.path.abspath("duplicati_gui_test_source") DESTINATION_FOLDER = os.path.abspath("duplicati_gui_test_destination") DESTINATION_FOLDER_DIRECT_RESTORE = os.path.abspath("duplicati_gui_test_destination_direct_restore") RESTORE_FOLDER = os.path.abspath("duplicati_gui_test_restore") DIRECT_RESTORE_FOLDER = os.path.abspath("duplicati_gui_test_direct_restore") # wait 5 seconds for duplicati server to start time.sleep(5) driver.implicitly_wait(10) driver.get("http://localhost:8200/ngax/index.html") if "Duplicati" not in driver.title: raise Exception("Unable to load duplicati GUI!") # Create and hash random files in the source folder write_random_file(1024 * 1024, SOURCE_FOLDER + os.sep + "1MB.test") write_random_file(100 * 1024, SOURCE_FOLDER + os.sep + "subfolder" + os.sep + "100KB.test") sha1_source = sha1_folder(SOURCE_FOLDER) # Dismiss the password request driver.find_element_by_link_text("No, my machine has only a single account").click() # Add new backup driver.find_element_by_link_text("Add backup").click() # Choose the "add new" option driver.find_element_by_id("blank").click() driver.find_element_by_xpath("//input[@class='submit next']").click() # Add new backup - General page time.sleep(1) driver.find_element_by_id("name").send_keys(BACKUP_NAME) driver.find_element_by_id("passphrase").send_keys(PASSWORD) driver.find_element_by_id("repeat-passphrase").send_keys(PASSWORD) driver.find_element_by_id("nextStep1").click() # Add new backup - Destination page driver.find_element_by_link_text("Manually type path").click() driver.find_element_by_id("file_path").send_keys(DESTINATION_FOLDER) driver.find_element_by_id("nextStep2").click() # Add new backup - Source Data page driver.find_element_by_id("sourcePath").send_keys(os.path.abspath(SOURCE_FOLDER) + os.sep) driver.find_element_by_id("sourceFolderPathAdd").click() driver.find_element_by_id("nextStep3").click() # Add new backup - Schedule page useScheduleRun = driver.find_element_by_id("useScheduleRun") if useScheduleRun.is_selected(): useScheduleRun.click() driver.find_element_by_id("nextStep4").click() # Add new backup - Options page driver.find_element_by_id("save").click() # Run the backup job and wait for finish driver.find_element_by_link_text(BACKUP_NAME).click() [n for n in driver.find_elements_by_xpath("//dl[@class='taskmenu']/dd/p/span[contains(text(),'Run now')]") if n.is_displayed()][0].click() wait_for_text(60, "//div[@class='task ng-scope']/dl[2]/dd[1]", "(took ") # Restore if len([n for n in driver.find_elements_by_xpath("//span[contains(text(),'Restore files ...')]") if n.is_displayed()]) == 0: driver.find_element_by_link_text(BACKUP_NAME).click() [n for n in driver.find_elements_by_xpath("//span[contains(text(),'Restore files ...')]") if n.is_displayed()][0].click() driver.find_element_by_xpath("//span[contains(text(),'" + SOURCE_FOLDER + "')]") # wait for filelist time.sleep(1) driver.find_element_by_xpath("//restore-file-picker/ul/li/div/a[2]").click() # select root folder checkbox driver.find_element_by_xpath("//form[@id='restore']/div[1]/div[@class='buttons']/a/span[contains(text(), 'Continue')]").click() driver.find_element_by_id("restoretonewpath").click() driver.find_element_by_id("restore_path").send_keys(RESTORE_FOLDER) driver.find_element_by_xpath("//form[@id='restore']/div/div[@class='buttons']/a/span[contains(text(),'Restore')]").click() # wait for restore to finish wait_for_text(60, "//form[@id='restore']/div[3]/h3/div[1]", "Your files and folders have been restored successfully.") # hash restored files sha1_restore = sha1_folder(RESTORE_FOLDER) # cleanup: delete source and restore folder and rename destination folder for direct restore shutil.rmtree(SOURCE_FOLDER) shutil.rmtree(RESTORE_FOLDER) os.rename(DESTINATION_FOLDER, DESTINATION_FOLDER_DIRECT_RESTORE) # direct restore driver.find_element_by_link_text("Restore").click() # Choose the "restore direct" option driver.find_element_by_id("direct").click() driver.find_element_by_xpath("//input[@class='submit next']").click() time.sleep(1) driver.find_element_by_link_text("Manually type path").click() driver.find_element_by_id("file_path").send_keys(DESTINATION_FOLDER_DIRECT_RESTORE) driver.find_element_by_id("nextStep1").click() driver.find_element_by_id("password").send_keys(PASSWORD) driver.find_element_by_id("connect").click() driver.find_element_by_xpath("//span[contains(text(),'" + SOURCE_FOLDER + "')]") # wait for filelist time.sleep(1) driver.find_element_by_xpath("//restore-file-picker/ul/li/div/a[2]").click() # select root folder checkbox time.sleep(1) driver.find_element_by_xpath("//form[@id='restore']/div[1]/div[@class='buttons']/a/span[contains(text(), 'Continue')]").click() driver.find_element_by_id("restoretonewpath").click() driver.find_element_by_id("restore_path").send_keys(DIRECT_RESTORE_FOLDER) driver.find_element_by_xpath("//form[@id='restore']/div/div[@class='buttons']/a/span[contains(text(),'Restore')]").click() # wait for restore to finish wait_for_text(60, "//form[@id='restore']/div[3]/h3/div[1]", "Your files and folders have been restored successfully.") # hash direct restore files sha1_direct_restore = sha1_folder(DIRECT_RESTORE_FOLDER) print "Source hashes: " + str(sha1_source) print "Restore hashes: " + str(sha1_restore) print "Direct Restore hashes: " + str(sha1_direct_restore) # Tell Sauce Labs to stop the test driver.quit() if not (sha1_source == sha1_restore and sha1_source == sha1_direct_restore): sys.exit(1) # return with error
random_line_split
guiTest.py
import os import sys import shutil import errno import time import hashlib from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions if "TRAVIS_BUILD_NUMBER" in os.environ: if "SAUCE_USERNAME" not in os.environ: print "No sauce labs login credentials found. Stopping tests..." sys.exit(0) capabilities = {'browserName': "firefox"} capabilities['platform'] = "Windows 7" capabilities['version'] = "48.0" capabilities['screenResolution'] = "1280x1024" capabilities["build"] = os.environ["TRAVIS_BUILD_NUMBER"] capabilities["tunnel-identifier"] = os.environ["TRAVIS_JOB_NUMBER"] # connect to sauce labs username = os.environ["SAUCE_USERNAME"] access_key = os.environ["SAUCE_ACCESS_KEY"] hub_url = "%s:%s@localhost:4445" % (username, access_key) driver = webdriver.Remote(command_executor="http://%s/wd/hub" % hub_url, desired_capabilities=capabilities) else: # local print "Using LOCAL webdriver" profile = webdriver.FirefoxProfile() profile.set_preference("intl.accept_languages", "en") driver = webdriver.Firefox(profile) driver.maximize_window() def write_random_file(size, filename): if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(filename, 'wb') as fout: fout.write(os.urandom(size)) def sha1_file(filename): BLOCKSIZE = 65536 hasher = hashlib.sha1() with open(filename, 'rb') as afile: buf = afile.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = afile.read(BLOCKSIZE) return hasher.hexdigest() def sha1_folder(folder): sha1_dict = {} for root, dirs, files in os.walk(folder): for filename in files: file_path = os.path.join(root, filename) sha1 = sha1_file(file_path) relative_file_path = os.path.relpath(file_path, folder) sha1_dict.update({relative_file_path: sha1}) return sha1_dict def
(time, xpath, text): WebDriverWait(driver, time).until(expected_conditions.text_to_be_present_in_element((By.XPATH, xpath), text)) BACKUP_NAME = "BackupName" PASSWORD = "the_backup_password_is_really_long_and_safe" SOURCE_FOLDER = os.path.abspath("duplicati_gui_test_source") DESTINATION_FOLDER = os.path.abspath("duplicati_gui_test_destination") DESTINATION_FOLDER_DIRECT_RESTORE = os.path.abspath("duplicati_gui_test_destination_direct_restore") RESTORE_FOLDER = os.path.abspath("duplicati_gui_test_restore") DIRECT_RESTORE_FOLDER = os.path.abspath("duplicati_gui_test_direct_restore") # wait 5 seconds for duplicati server to start time.sleep(5) driver.implicitly_wait(10) driver.get("http://localhost:8200/ngax/index.html") if "Duplicati" not in driver.title: raise Exception("Unable to load duplicati GUI!") # Create and hash random files in the source folder write_random_file(1024 * 1024, SOURCE_FOLDER + os.sep + "1MB.test") write_random_file(100 * 1024, SOURCE_FOLDER + os.sep + "subfolder" + os.sep + "100KB.test") sha1_source = sha1_folder(SOURCE_FOLDER) # Dismiss the password request driver.find_element_by_link_text("No, my machine has only a single account").click() # Add new backup driver.find_element_by_link_text("Add backup").click() # Choose the "add new" option driver.find_element_by_id("blank").click() driver.find_element_by_xpath("//input[@class='submit next']").click() # Add new backup - General page time.sleep(1) driver.find_element_by_id("name").send_keys(BACKUP_NAME) driver.find_element_by_id("passphrase").send_keys(PASSWORD) driver.find_element_by_id("repeat-passphrase").send_keys(PASSWORD) driver.find_element_by_id("nextStep1").click() # Add new backup - Destination page driver.find_element_by_link_text("Manually type path").click() driver.find_element_by_id("file_path").send_keys(DESTINATION_FOLDER) driver.find_element_by_id("nextStep2").click() # Add new backup - Source Data page driver.find_element_by_id("sourcePath").send_keys(os.path.abspath(SOURCE_FOLDER) + os.sep) driver.find_element_by_id("sourceFolderPathAdd").click() driver.find_element_by_id("nextStep3").click() # Add new backup - Schedule page useScheduleRun = driver.find_element_by_id("useScheduleRun") if useScheduleRun.is_selected(): useScheduleRun.click() driver.find_element_by_id("nextStep4").click() # Add new backup - Options page driver.find_element_by_id("save").click() # Run the backup job and wait for finish driver.find_element_by_link_text(BACKUP_NAME).click() [n for n in driver.find_elements_by_xpath("//dl[@class='taskmenu']/dd/p/span[contains(text(),'Run now')]") if n.is_displayed()][0].click() wait_for_text(60, "//div[@class='task ng-scope']/dl[2]/dd[1]", "(took ") # Restore if len([n for n in driver.find_elements_by_xpath("//span[contains(text(),'Restore files ...')]") if n.is_displayed()]) == 0: driver.find_element_by_link_text(BACKUP_NAME).click() [n for n in driver.find_elements_by_xpath("//span[contains(text(),'Restore files ...')]") if n.is_displayed()][0].click() driver.find_element_by_xpath("//span[contains(text(),'" + SOURCE_FOLDER + "')]") # wait for filelist time.sleep(1) driver.find_element_by_xpath("//restore-file-picker/ul/li/div/a[2]").click() # select root folder checkbox driver.find_element_by_xpath("//form[@id='restore']/div[1]/div[@class='buttons']/a/span[contains(text(), 'Continue')]").click() driver.find_element_by_id("restoretonewpath").click() driver.find_element_by_id("restore_path").send_keys(RESTORE_FOLDER) driver.find_element_by_xpath("//form[@id='restore']/div/div[@class='buttons']/a/span[contains(text(),'Restore')]").click() # wait for restore to finish wait_for_text(60, "//form[@id='restore']/div[3]/h3/div[1]", "Your files and folders have been restored successfully.") # hash restored files sha1_restore = sha1_folder(RESTORE_FOLDER) # cleanup: delete source and restore folder and rename destination folder for direct restore shutil.rmtree(SOURCE_FOLDER) shutil.rmtree(RESTORE_FOLDER) os.rename(DESTINATION_FOLDER, DESTINATION_FOLDER_DIRECT_RESTORE) # direct restore driver.find_element_by_link_text("Restore").click() # Choose the "restore direct" option driver.find_element_by_id("direct").click() driver.find_element_by_xpath("//input[@class='submit next']").click() time.sleep(1) driver.find_element_by_link_text("Manually type path").click() driver.find_element_by_id("file_path").send_keys(DESTINATION_FOLDER_DIRECT_RESTORE) driver.find_element_by_id("nextStep1").click() driver.find_element_by_id("password").send_keys(PASSWORD) driver.find_element_by_id("connect").click() driver.find_element_by_xpath("//span[contains(text(),'" + SOURCE_FOLDER + "')]") # wait for filelist time.sleep(1) driver.find_element_by_xpath("//restore-file-picker/ul/li/div/a[2]").click() # select root folder checkbox time.sleep(1) driver.find_element_by_xpath("//form[@id='restore']/div[1]/div[@class='buttons']/a/span[contains(text(), 'Continue')]").click() driver.find_element_by_id("restoretonewpath").click() driver.find_element_by_id("restore_path").send_keys(DIRECT_RESTORE_FOLDER) driver.find_element_by_xpath("//form[@id='restore']/div/div[@class='buttons']/a/span[contains(text(),'Restore')]").click() # wait for restore to finish wait_for_text(60, "//form[@id='restore']/div[3]/h3/div[1]", "Your files and folders have been restored successfully.") # hash direct restore files sha1_direct_restore = sha1_folder(DIRECT_RESTORE_FOLDER) print "Source hashes: " + str(sha1_source) print "Restore hashes: " + str(sha1_restore) print "Direct Restore hashes: " + str(sha1_direct_restore) # Tell Sauce Labs to stop the test driver.quit() if not (sha1_source == sha1_restore and sha1_source == sha1_direct_restore): sys.exit(1) # return with error
wait_for_text
identifier_name
guiTest.py
import os import sys import shutil import errno import time import hashlib from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions if "TRAVIS_BUILD_NUMBER" in os.environ: if "SAUCE_USERNAME" not in os.environ: print "No sauce labs login credentials found. Stopping tests..." sys.exit(0) capabilities = {'browserName': "firefox"} capabilities['platform'] = "Windows 7" capabilities['version'] = "48.0" capabilities['screenResolution'] = "1280x1024" capabilities["build"] = os.environ["TRAVIS_BUILD_NUMBER"] capabilities["tunnel-identifier"] = os.environ["TRAVIS_JOB_NUMBER"] # connect to sauce labs username = os.environ["SAUCE_USERNAME"] access_key = os.environ["SAUCE_ACCESS_KEY"] hub_url = "%s:%s@localhost:4445" % (username, access_key) driver = webdriver.Remote(command_executor="http://%s/wd/hub" % hub_url, desired_capabilities=capabilities) else: # local print "Using LOCAL webdriver" profile = webdriver.FirefoxProfile() profile.set_preference("intl.accept_languages", "en") driver = webdriver.Firefox(profile) driver.maximize_window() def write_random_file(size, filename): if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(filename, 'wb') as fout: fout.write(os.urandom(size)) def sha1_file(filename): BLOCKSIZE = 65536 hasher = hashlib.sha1() with open(filename, 'rb') as afile: buf = afile.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = afile.read(BLOCKSIZE) return hasher.hexdigest() def sha1_folder(folder):
def wait_for_text(time, xpath, text): WebDriverWait(driver, time).until(expected_conditions.text_to_be_present_in_element((By.XPATH, xpath), text)) BACKUP_NAME = "BackupName" PASSWORD = "the_backup_password_is_really_long_and_safe" SOURCE_FOLDER = os.path.abspath("duplicati_gui_test_source") DESTINATION_FOLDER = os.path.abspath("duplicati_gui_test_destination") DESTINATION_FOLDER_DIRECT_RESTORE = os.path.abspath("duplicati_gui_test_destination_direct_restore") RESTORE_FOLDER = os.path.abspath("duplicati_gui_test_restore") DIRECT_RESTORE_FOLDER = os.path.abspath("duplicati_gui_test_direct_restore") # wait 5 seconds for duplicati server to start time.sleep(5) driver.implicitly_wait(10) driver.get("http://localhost:8200/ngax/index.html") if "Duplicati" not in driver.title: raise Exception("Unable to load duplicati GUI!") # Create and hash random files in the source folder write_random_file(1024 * 1024, SOURCE_FOLDER + os.sep + "1MB.test") write_random_file(100 * 1024, SOURCE_FOLDER + os.sep + "subfolder" + os.sep + "100KB.test") sha1_source = sha1_folder(SOURCE_FOLDER) # Dismiss the password request driver.find_element_by_link_text("No, my machine has only a single account").click() # Add new backup driver.find_element_by_link_text("Add backup").click() # Choose the "add new" option driver.find_element_by_id("blank").click() driver.find_element_by_xpath("//input[@class='submit next']").click() # Add new backup - General page time.sleep(1) driver.find_element_by_id("name").send_keys(BACKUP_NAME) driver.find_element_by_id("passphrase").send_keys(PASSWORD) driver.find_element_by_id("repeat-passphrase").send_keys(PASSWORD) driver.find_element_by_id("nextStep1").click() # Add new backup - Destination page driver.find_element_by_link_text("Manually type path").click() driver.find_element_by_id("file_path").send_keys(DESTINATION_FOLDER) driver.find_element_by_id("nextStep2").click() # Add new backup - Source Data page driver.find_element_by_id("sourcePath").send_keys(os.path.abspath(SOURCE_FOLDER) + os.sep) driver.find_element_by_id("sourceFolderPathAdd").click() driver.find_element_by_id("nextStep3").click() # Add new backup - Schedule page useScheduleRun = driver.find_element_by_id("useScheduleRun") if useScheduleRun.is_selected(): useScheduleRun.click() driver.find_element_by_id("nextStep4").click() # Add new backup - Options page driver.find_element_by_id("save").click() # Run the backup job and wait for finish driver.find_element_by_link_text(BACKUP_NAME).click() [n for n in driver.find_elements_by_xpath("//dl[@class='taskmenu']/dd/p/span[contains(text(),'Run now')]") if n.is_displayed()][0].click() wait_for_text(60, "//div[@class='task ng-scope']/dl[2]/dd[1]", "(took ") # Restore if len([n for n in driver.find_elements_by_xpath("//span[contains(text(),'Restore files ...')]") if n.is_displayed()]) == 0: driver.find_element_by_link_text(BACKUP_NAME).click() [n for n in driver.find_elements_by_xpath("//span[contains(text(),'Restore files ...')]") if n.is_displayed()][0].click() driver.find_element_by_xpath("//span[contains(text(),'" + SOURCE_FOLDER + "')]") # wait for filelist time.sleep(1) driver.find_element_by_xpath("//restore-file-picker/ul/li/div/a[2]").click() # select root folder checkbox driver.find_element_by_xpath("//form[@id='restore']/div[1]/div[@class='buttons']/a/span[contains(text(), 'Continue')]").click() driver.find_element_by_id("restoretonewpath").click() driver.find_element_by_id("restore_path").send_keys(RESTORE_FOLDER) driver.find_element_by_xpath("//form[@id='restore']/div/div[@class='buttons']/a/span[contains(text(),'Restore')]").click() # wait for restore to finish wait_for_text(60, "//form[@id='restore']/div[3]/h3/div[1]", "Your files and folders have been restored successfully.") # hash restored files sha1_restore = sha1_folder(RESTORE_FOLDER) # cleanup: delete source and restore folder and rename destination folder for direct restore shutil.rmtree(SOURCE_FOLDER) shutil.rmtree(RESTORE_FOLDER) os.rename(DESTINATION_FOLDER, DESTINATION_FOLDER_DIRECT_RESTORE) # direct restore driver.find_element_by_link_text("Restore").click() # Choose the "restore direct" option driver.find_element_by_id("direct").click() driver.find_element_by_xpath("//input[@class='submit next']").click() time.sleep(1) driver.find_element_by_link_text("Manually type path").click() driver.find_element_by_id("file_path").send_keys(DESTINATION_FOLDER_DIRECT_RESTORE) driver.find_element_by_id("nextStep1").click() driver.find_element_by_id("password").send_keys(PASSWORD) driver.find_element_by_id("connect").click() driver.find_element_by_xpath("//span[contains(text(),'" + SOURCE_FOLDER + "')]") # wait for filelist time.sleep(1) driver.find_element_by_xpath("//restore-file-picker/ul/li/div/a[2]").click() # select root folder checkbox time.sleep(1) driver.find_element_by_xpath("//form[@id='restore']/div[1]/div[@class='buttons']/a/span[contains(text(), 'Continue')]").click() driver.find_element_by_id("restoretonewpath").click() driver.find_element_by_id("restore_path").send_keys(DIRECT_RESTORE_FOLDER) driver.find_element_by_xpath("//form[@id='restore']/div/div[@class='buttons']/a/span[contains(text(),'Restore')]").click() # wait for restore to finish wait_for_text(60, "//form[@id='restore']/div[3]/h3/div[1]", "Your files and folders have been restored successfully.") # hash direct restore files sha1_direct_restore = sha1_folder(DIRECT_RESTORE_FOLDER) print "Source hashes: " + str(sha1_source) print "Restore hashes: " + str(sha1_restore) print "Direct Restore hashes: " + str(sha1_direct_restore) # Tell Sauce Labs to stop the test driver.quit() if not (sha1_source == sha1_restore and sha1_source == sha1_direct_restore): sys.exit(1) # return with error
sha1_dict = {} for root, dirs, files in os.walk(folder): for filename in files: file_path = os.path.join(root, filename) sha1 = sha1_file(file_path) relative_file_path = os.path.relpath(file_path, folder) sha1_dict.update({relative_file_path: sha1}) return sha1_dict
identifier_body
guiTest.py
import os import sys import shutil import errno import time import hashlib from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions if "TRAVIS_BUILD_NUMBER" in os.environ: if "SAUCE_USERNAME" not in os.environ: print "No sauce labs login credentials found. Stopping tests..." sys.exit(0) capabilities = {'browserName': "firefox"} capabilities['platform'] = "Windows 7" capabilities['version'] = "48.0" capabilities['screenResolution'] = "1280x1024" capabilities["build"] = os.environ["TRAVIS_BUILD_NUMBER"] capabilities["tunnel-identifier"] = os.environ["TRAVIS_JOB_NUMBER"] # connect to sauce labs username = os.environ["SAUCE_USERNAME"] access_key = os.environ["SAUCE_ACCESS_KEY"] hub_url = "%s:%s@localhost:4445" % (username, access_key) driver = webdriver.Remote(command_executor="http://%s/wd/hub" % hub_url, desired_capabilities=capabilities) else: # local print "Using LOCAL webdriver" profile = webdriver.FirefoxProfile() profile.set_preference("intl.accept_languages", "en") driver = webdriver.Firefox(profile) driver.maximize_window() def write_random_file(size, filename): if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(filename, 'wb') as fout: fout.write(os.urandom(size)) def sha1_file(filename): BLOCKSIZE = 65536 hasher = hashlib.sha1() with open(filename, 'rb') as afile: buf = afile.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = afile.read(BLOCKSIZE) return hasher.hexdigest() def sha1_folder(folder): sha1_dict = {} for root, dirs, files in os.walk(folder): for filename in files:
return sha1_dict def wait_for_text(time, xpath, text): WebDriverWait(driver, time).until(expected_conditions.text_to_be_present_in_element((By.XPATH, xpath), text)) BACKUP_NAME = "BackupName" PASSWORD = "the_backup_password_is_really_long_and_safe" SOURCE_FOLDER = os.path.abspath("duplicati_gui_test_source") DESTINATION_FOLDER = os.path.abspath("duplicati_gui_test_destination") DESTINATION_FOLDER_DIRECT_RESTORE = os.path.abspath("duplicati_gui_test_destination_direct_restore") RESTORE_FOLDER = os.path.abspath("duplicati_gui_test_restore") DIRECT_RESTORE_FOLDER = os.path.abspath("duplicati_gui_test_direct_restore") # wait 5 seconds for duplicati server to start time.sleep(5) driver.implicitly_wait(10) driver.get("http://localhost:8200/ngax/index.html") if "Duplicati" not in driver.title: raise Exception("Unable to load duplicati GUI!") # Create and hash random files in the source folder write_random_file(1024 * 1024, SOURCE_FOLDER + os.sep + "1MB.test") write_random_file(100 * 1024, SOURCE_FOLDER + os.sep + "subfolder" + os.sep + "100KB.test") sha1_source = sha1_folder(SOURCE_FOLDER) # Dismiss the password request driver.find_element_by_link_text("No, my machine has only a single account").click() # Add new backup driver.find_element_by_link_text("Add backup").click() # Choose the "add new" option driver.find_element_by_id("blank").click() driver.find_element_by_xpath("//input[@class='submit next']").click() # Add new backup - General page time.sleep(1) driver.find_element_by_id("name").send_keys(BACKUP_NAME) driver.find_element_by_id("passphrase").send_keys(PASSWORD) driver.find_element_by_id("repeat-passphrase").send_keys(PASSWORD) driver.find_element_by_id("nextStep1").click() # Add new backup - Destination page driver.find_element_by_link_text("Manually type path").click() driver.find_element_by_id("file_path").send_keys(DESTINATION_FOLDER) driver.find_element_by_id("nextStep2").click() # Add new backup - Source Data page driver.find_element_by_id("sourcePath").send_keys(os.path.abspath(SOURCE_FOLDER) + os.sep) driver.find_element_by_id("sourceFolderPathAdd").click() driver.find_element_by_id("nextStep3").click() # Add new backup - Schedule page useScheduleRun = driver.find_element_by_id("useScheduleRun") if useScheduleRun.is_selected(): useScheduleRun.click() driver.find_element_by_id("nextStep4").click() # Add new backup - Options page driver.find_element_by_id("save").click() # Run the backup job and wait for finish driver.find_element_by_link_text(BACKUP_NAME).click() [n for n in driver.find_elements_by_xpath("//dl[@class='taskmenu']/dd/p/span[contains(text(),'Run now')]") if n.is_displayed()][0].click() wait_for_text(60, "//div[@class='task ng-scope']/dl[2]/dd[1]", "(took ") # Restore if len([n for n in driver.find_elements_by_xpath("//span[contains(text(),'Restore files ...')]") if n.is_displayed()]) == 0: driver.find_element_by_link_text(BACKUP_NAME).click() [n for n in driver.find_elements_by_xpath("//span[contains(text(),'Restore files ...')]") if n.is_displayed()][0].click() driver.find_element_by_xpath("//span[contains(text(),'" + SOURCE_FOLDER + "')]") # wait for filelist time.sleep(1) driver.find_element_by_xpath("//restore-file-picker/ul/li/div/a[2]").click() # select root folder checkbox driver.find_element_by_xpath("//form[@id='restore']/div[1]/div[@class='buttons']/a/span[contains(text(), 'Continue')]").click() driver.find_element_by_id("restoretonewpath").click() driver.find_element_by_id("restore_path").send_keys(RESTORE_FOLDER) driver.find_element_by_xpath("//form[@id='restore']/div/div[@class='buttons']/a/span[contains(text(),'Restore')]").click() # wait for restore to finish wait_for_text(60, "//form[@id='restore']/div[3]/h3/div[1]", "Your files and folders have been restored successfully.") # hash restored files sha1_restore = sha1_folder(RESTORE_FOLDER) # cleanup: delete source and restore folder and rename destination folder for direct restore shutil.rmtree(SOURCE_FOLDER) shutil.rmtree(RESTORE_FOLDER) os.rename(DESTINATION_FOLDER, DESTINATION_FOLDER_DIRECT_RESTORE) # direct restore driver.find_element_by_link_text("Restore").click() # Choose the "restore direct" option driver.find_element_by_id("direct").click() driver.find_element_by_xpath("//input[@class='submit next']").click() time.sleep(1) driver.find_element_by_link_text("Manually type path").click() driver.find_element_by_id("file_path").send_keys(DESTINATION_FOLDER_DIRECT_RESTORE) driver.find_element_by_id("nextStep1").click() driver.find_element_by_id("password").send_keys(PASSWORD) driver.find_element_by_id("connect").click() driver.find_element_by_xpath("//span[contains(text(),'" + SOURCE_FOLDER + "')]") # wait for filelist time.sleep(1) driver.find_element_by_xpath("//restore-file-picker/ul/li/div/a[2]").click() # select root folder checkbox time.sleep(1) driver.find_element_by_xpath("//form[@id='restore']/div[1]/div[@class='buttons']/a/span[contains(text(), 'Continue')]").click() driver.find_element_by_id("restoretonewpath").click() driver.find_element_by_id("restore_path").send_keys(DIRECT_RESTORE_FOLDER) driver.find_element_by_xpath("//form[@id='restore']/div/div[@class='buttons']/a/span[contains(text(),'Restore')]").click() # wait for restore to finish wait_for_text(60, "//form[@id='restore']/div[3]/h3/div[1]", "Your files and folders have been restored successfully.") # hash direct restore files sha1_direct_restore = sha1_folder(DIRECT_RESTORE_FOLDER) print "Source hashes: " + str(sha1_source) print "Restore hashes: " + str(sha1_restore) print "Direct Restore hashes: " + str(sha1_direct_restore) # Tell Sauce Labs to stop the test driver.quit() if not (sha1_source == sha1_restore and sha1_source == sha1_direct_restore): sys.exit(1) # return with error
file_path = os.path.join(root, filename) sha1 = sha1_file(file_path) relative_file_path = os.path.relpath(file_path, folder) sha1_dict.update({relative_file_path: sha1})
conditional_block
mm.lang.js
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview Moodle mobile lang lib. * @author <a href="mailto:jleyva@cvaconsulting.com">Juan Leyva</a> * @version 1.2 */ /** * @namespace Holds all the MoodleMobile language functionality. */ MM.lang = { strings: [], current: '', locale: '', /** * Determine the language for the app. * We check first if the user has selected by configuration a language.. * then the Mobile device locale (but we have to check that we have a language file for that locale) * then the remote site lang (if the remote site is loaded, we can be at the login screen) * finally, the language in the config.json file (default language) * * @return {string} Language id (en, es, etc...) */ determine: function() { // User preferences. var lang = MM.getConfig('lang'); if (typeof(lang) != 'undefined') { return lang; } // Locale // MM.lang.locale is loaded by Phonegap. if (MM.lang.locale) { lang = MM.lang.locale.toLowerCase().replace("-", "_"); if (typeof(MM.config.languages[lang]) != "undefined") { return lang; } else if(lang.length > 2) { // Try without the region/country. lang = lang.substr(0, 2); if (typeof(MM.config.languages[lang]) != "undefined") { return lang; } } } // Browser language. RFC 4646. var browserLang = window.navigator.userLanguage || window.navigator.language; // Normalize i.e: pt-BR to pt_br. browserLang = browserLang.toLowerCase().replace("-", "_"); if (typeof(MM.config.languages[browserLang]) != "undefined") { return browserLang; } else if(browserLang.length > 2) { // Try without the region/country. browserLang = browserLang.substr(0, 2); if (typeof(MM.config.languages[browserLang]) != "undefined") { return browserLang; } } // Default site lang. if (typeof(MM.config.current_site) != 'undefined' && MM.config.current_site && typeof(MM.config.current_site.lang) != 'undefined' && typeof(MM.config.languages[MM.config.current_site.lang]) != "undefined") { return MM.config.current_site.lang; } // Default language. return MM.config.default_lang; }, setup: function(component) { MM.log('Strings: Lang setup for ' + component); var cacheEl = ""; if (typeof(component) == 'undefined') { component = 'core'; cacheEl = 'core'; } if (component != 'core') { cacheEl = MM.plugins[component].settings.lang.component; } var lang = MM.lang.determine(); // Try to find in cache the language strings. // Languages are automatically sync and stored in cache, forcing to not expire. // Check if we are inside a site first, because languages can be set up in the login screen. if (typeof(MM.config.current_site) != "undefined" && MM.config.current_site) { var langStrings = MM.cache.getElement('lang-' + cacheEl + '-' + lang, true); if (langStrings) { MM.lang.loadLang(component, lang, langStrings); MM.log('Strings loaded from cache (remote syte)', 'Strings'); } } }, loadLang: function(component, lang, strings) { MM.log('Strings: Loading lang ' + lang + ' for component ' + component); MM.lang.current = lang; if (typeof(MM.lang.strings[lang]) == 'undefined') { MM.lang.strings[lang] = []; } if (strings && Object.keys(strings).length > 0) { MM.lang.strings[lang][component] = strings; } }, loadPluginLang: function(component, strings) { MM.log('Strings: Loading plugin lang ' + component); if (!MM.lang.current) { MM.lang.current = 'en'; MM.lang.strings['en'] = []; } // Try to find in cache the language strings. // Languages are automatically sync and stored in cache, forcing to not expire. var cacheStrings = MM.cache.getElement('lang-' + component + '-' + MM.lang.current, true); if (cacheStrings) { strings = cacheStrings; MM.log('Strings: Plugin '+component+' Strings loaded from cache (remote syte)'); } MM.lang.strings[MM.lang.current][component] = strings; if (MM.lang.current != 'en') { MM.lang.strings['en'][component] = strings; } }, pluginName: function(plugin) { if (MM.plugins[plugin].settings.lang.component != 'core') { return MM.lang.s('plugin' + plugin + 'name', plugin); } return MM.lang.s(plugin); }, /** * Main function for translating strings * * @this {MM.lang} * @param {string} id The unique id of the string to be translated. * @param {string} component Core for regular strings or pluginname for plugins. */ s: function(id, component) { if (typeof(component) == 'undefined') { component = 'core'; } var translated = ''; // First we check if we find the string in the current language. if (typeof(MM.lang.strings[MM.lang.current][component]) != 'undefined' && typeof(MM.lang.strings[MM.lang.current][component][id]) !== 'undefined' ) { translated = MM.lang.strings[MM.lang.current][component][id]; } // If not, we look for the string in the default language "english" else if (typeof(MM.lang.strings['en']) != 'undefined' && typeof(MM.lang.strings['en'][component]) !== 'undefined' && typeof(MM.lang.strings['en'][component][id]) !== 'undefined') { translated = MM.lang.strings['en'][component][id]; } // If not found yet, we look for the string in the base language file (lang/en.json) if (!translated && component == 'core' && MM.lang.base[id]) { translated = MM.lang.base[id]; } // If not found yet (for plugins only) we look for the string in the base lang also (plugin/lang/en.json). if (!translated && component != "core" && MM.plugins[component].settings.lang.strings && MM.plugins[component].settings.lang.strings[id] !== 'undefined') { translated = MM.plugins[component].settings.lang.strings[id]; } // For missing strings, we use the [string] notation. if (!translated) { translated = '[[' + id + ']]'; } return translated; }, sync: function(forced) { MM.log('Executing lang sync function', 'Sync'); if (forced) { MM.Router.navigate(""); } var lang = MM.lang.determine(); if (MM.deviceConnected() && MM.getConfig('sync_lang_on')) { var data = { 'component': 'mobile', 'lang': lang
MM.moodleWSCall('core_get_component_strings', data, function(strings) { var stringsFormatted = {}; if (strings.length > 0) { $.each(strings, function(index, string) { stringsFormatted[string.stringid] = string.string; }); } MM.cache.addElement('lang-core-' + lang, stringsFormatted, 'lang'); if (forced) { MM.popMessage(MM.lang.s("langsynced") + " (" + strings.length + ") " + MM.lang.s("strings")); } }, {silently: true, getFromCache: false}); for (var el in MM.plugins) { var plugin = MM.plugins[el]; var component = plugin.settings.lang.component; if (component != 'core') { var data = { 'component': component, 'lang': lang }; MM.log('Sync: Loading lang from remtote site for component: ' + component); MM.moodleWSCall('core_get_component_strings', data, function(strings) { var stringsFormatted = {}; if (strings.length > 0) { $.each(strings, function(index, string) { stringsFormatted[string.stringid] = string.string; }); } MM.cache.addElement( 'lang-' + data.component + '-' + lang, stringsFormatted, 'lang' ); }, {silently: true} ); } } } } };
}; MM.log('Loading lang file from remote site for core', 'Sync');
random_line_split
issue-52742.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(nll)] #![feature(in_band_lifetimes)] struct Foo<'a, 'b> { x: &'a u32, y: &'b u32, } struct Bar<'b> {
impl Foo<'_, '_> { fn take_bar(&mut self, b: Bar<'_>) { self.y = b.z //~^ ERROR unsatisfied lifetime constraints } } fn main() { }
z: &'b u32 }
random_line_split
issue-52742.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(nll)] #![feature(in_band_lifetimes)] struct
<'a, 'b> { x: &'a u32, y: &'b u32, } struct Bar<'b> { z: &'b u32 } impl Foo<'_, '_> { fn take_bar(&mut self, b: Bar<'_>) { self.y = b.z //~^ ERROR unsatisfied lifetime constraints } } fn main() { }
Foo
identifier_name
issue-52742.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(nll)] #![feature(in_band_lifetimes)] struct Foo<'a, 'b> { x: &'a u32, y: &'b u32, } struct Bar<'b> { z: &'b u32 } impl Foo<'_, '_> { fn take_bar(&mut self, b: Bar<'_>)
} fn main() { }
{ self.y = b.z //~^ ERROR unsatisfied lifetime constraints }
identifier_body
hiv-program-snapshot.component.ts
import { OnInit, Component, Input, Output, EventEmitter } from '@angular/core'; import { Subscription } from 'rxjs'; import { take, finalize } from 'rxjs/operators'; import * as _ from 'lodash'; import * as moment from 'moment'; import { Patient } from '../../../models/patient.model'; import { HivSummaryResourceService } from '../../../etl-api/hiv-summary-resource.service'; import { LocationResourceService } from '../../../openmrs-api/location-resource.service'; import { EncounterResourceService } from 'src/app/openmrs-api/encounter-resource.service'; import { UserDefaultPropertiesService } from '../../../user-default-properties/user-default-properties.service'; import { CervicalCancerScreeningSummaResourceService } from './../../../etl-api/cervical-cancer-screening-summary-resource.service'; const mdtProgramUuid = 'c4246ff0-b081-460c-bcc5-b0678012659e'; const stdProgramUuid = '781d85b0-1359-11df-a1f1-0026b9348838'; const HivNegativesProgram = [ 'c19aec66-1a40-4588-9b03-b6be55a8dd1d', '96047aaf-7ab3-45e9-be6a-b61810fe617d' ]; @Component({ selector: 'hiv-snapshot', styleUrls: ['./hiv-program-snapshot.component.css'], templateUrl: './hiv-program-snapshot.component.html' }) export class HivProgramSnapshotComponent implements OnInit { @Input() public set enrolledProgrames(enrolledProgrames) { this.patientPrograms = enrolledProgrames; } @Input() public set program(program) { this.showViremiaAlert = program.uuid === mdtProgramUuid ? true : false; this.hasMoriskyScore = program.uuid === stdProgramUuid ? true : false; this.curProgram = program; _.each(HivNegativesProgram, (p) => { if (p === program.uuid) { this.displayProgram = false; } }); } @Input() public patient: Patient; @Output() public addBackground = new EventEmitter(); public hasError = false; public hasData = false; public hasMoriskyScore = false; public clinicalEncounters: any[] = []; public patientData: any = {}; public loadingData = false; public hasLoadedData = false; public prev_encounter_date: any = ''; public isVirallyUnsuppressed = false; public patientCareStatus: any; public hasTransferEncounter = false; public latestEncounterLocation: any = {}; public hasSubsequentClinicalEncounter = false; public resolvedCareStatus: any; public showCareStatus = true; public backgroundColor: any = { pink: '#FFC0CB', yellow: '#FFFF00' }; public viremiaAlert: string; public showViremiaAlert: boolean; public lowViremia: boolean; public highViremia: boolean; public currentPatientSub: Subscription; public _patient: Patient = new Patient({}); public moriskyScore: any = ''; public moriskyScore4: any = ''; public moriskyScore8: any = ''; public ismoriskyScore8 = false; public ismoriskyScore4 = false; public moriskyDenominator: any = ''; public moriskyRating: any = ''; public isMoriskyScorePoorOrInadequate = false; public hivDisclosureStatus: any; public latestCervicalScreeningSummary = []; public cervicalScreeningSummary = []; private obs: any[] = []; private gbvScreeningResult: any; private curProgram: any; private patientPrograms: any; public displayProgram = true; public gbvScreeningLabel: String; constructor( private hivSummaryResourceService: HivSummaryResourceService, private encounterResourceService: EncounterResourceService, private locationResource: LocationResourceService, private userDefaultPropertiesService: UserDefaultPropertiesService, private cervicalCancerScreeningSummaryService: CervicalCancerScreeningSummaResourceService ) {} public ngOnInit() { _.delay( (patientUuid) => { if (_.isNil(this.patient)) { this.hasError = true; } else { this.hasData = false; this.getHivSummary(patientUuid); this.getPatientCervicalScreeningSummary(patientUuid); this.patient.person.age > 19 ? (this.gbvScreeningLabel = 'GBV Screening') : (this.gbvScreeningLabel = 'VAC Screening'); } }, 0, this.patient.uuid ); this.getMoriskyScore(); } public getHivSummary(patientUuid) { this.loadingData = true; this.hivSummaryResourceService .getHivSummary(patientUuid, 0, 10) .pipe(take(1)) .subscribe((results) => { let latestVlResult: any; let latestVlDate = ''; let latestVl = null; this.loadingData = false; this.hasLoadedData = true; if (results[0]) { latestVlResult = this.getlatestVlResult(results); latestVlDate = latestVlResult.vl_1_date; latestVl = latestVlResult.vl_1; latestVl = latestVlResult.vl_1; this.patientCareStatus = results[0].patient_care_status; this.hivDisclosureStatus = results[0].hiv_status_disclosed === 1 ? 'Yes' : 'No'; if (this.showViremiaAlert) { this.checkViremia(latestVl); } this.gbvScreeningResult = this.checkGbvScreening( results[0].gbv_screening_result ); } this.clinicalEncounters = this.getClinicalEncounters(results); const latestClinicalEncounter = _.first(this.clinicalEncounters); this.hasTransferEncounter = this.checkIfHasTransferEncounter(results); const transferEncounterIndex = this.getIndexOfTransferEncounter( results ); // Did the patient have a clinical encounter following their transfer encounter i.e. did they return to care? this.hasSubsequentClinicalEncounter = results.indexOf(latestClinicalEncounter) < transferEncounterIndex ? true : false; this.patientData = _.first(this.clinicalEncounters); const patientDataCopy = this.patientData; if (!_.isNil(this.patientData)) { // assign latest vl and vl_1_date this.patientData = Object.assign(patientDataCopy, { vl_1_date: latestVlDate, vl_1: latestVl }); // flag red if VL > 1000 && (vl_1_date > (arv_start_date + 6 months)) if ( (this.patientData.vl_1 > 1000 && moment(this.patientData.vl_1_date) > moment(this.patientData.arv_start_date).add(6, 'months')) || this.patientData.prev_arv_line !== this.patientData.cur_arv_line ) { this.isVirallyUnsuppressed = true; } this.hasData = true; this.latestEncounterLocation = null; if (this.patientData.location_uuid) { this.resolveLastEncounterLocation(this.patientData.location_uuid); } } }); } public resolveLastEncounterLocation(location_uuid) { this.locationResource .getLocationByUuid(location_uuid, true) .pipe( finalize(() => { this.resolvedCareStatus = this.getPatientCareStatus( this.patientCareStatus ); }) ) .subscribe( (location) => { this.latestEncounterLocation = location; }, (error) => { console.error('Error resolving locations', error); } ); } public getPatientCareStatus(care_status_id: any) { const translateMap = { '159': 'DECEASED', '9079': 'UNTRACEABLE', '9080': 'PROCESS OF BEING TRACED', '9036': 'HIV NEGATIVE, NO LONGER AT RISK', '9083': 'SELF DISENGAGED FROM CARE', '6101': 'CONTINUE WITH CARE', '1285': 'TRANSFER CARE TO OTHER CENTER', '1286': 'TRANSFER TO AMPATH FACILITY', '1287': 'TRANSFER TO NON-AMPATH FACILITY', '9068': 'TRANSFER TO AMPATH FACILITY, NON-AMRS', '9504': 'TRANSFER TO MATERNAL CHILD HEALTH', '1594': 'PATIENT TRANSFERRED OUT', '9578': 'ENROLL IN AMPATH FACILITY', '9164': 'ENROLL CARE IN ANOTHER HEALTH FACILITY', '1732': 'AMPATH CLINIC TRANSFER', '9579': 'CONTINUE CARE IN OTHER FACILITY', '9580': 'FOLLOW-UP CARE PLAN, NOT SURE', '5622': 'OTHER', '10502': 'NON AMPATH CLINIC TRANSFER' }; /* if the patient transferred out and their care status is 'Continue with Care' despite them not returning to care, apply a yellow background on their summary snapshot to mark them out as a Transfer Out. */ /* if the patient is active in care with a care status of 'Continue with Care' and they are past their RTC date by over 1 week, apply a pink background to their snapshot summary and hide their care status. */ if (care_status_id === 6101) { if (this.hasTransferEncounter && !this.patientReturnedToCare()) { this.showCareStatus = false; this.showYellowBackground(); } else if ( moment(this.patientData.rtc_date).add(1, 'week') < moment(new Date()) ) { this.showPinkBackground(); } } // if patient is a Transfer Out, apply a yellow background to their snapshot summary if ( (this.hasTransferEncounter && this.isNonAmpathTransferOut(care_status_id)) || this.isIntraAmpathTransferFromCurrentLocation(care_status_id) ) { this.showYellowBackground(); } return this._toProperCase(translateMap[care_status_id]); } private checkIfHasTransferEncounter(summaries: any[]): boolean { if (summaries) { return _.some(summaries, (summary: any) => { return ( summary.encounter_type === 116 && summary.encounter_type_name === 'TRANSFERENCOUNTER' ); }); } } private getIndexOfTransferEncounter(summaries: any[]): number { if (summaries) { return _.findIndex(summaries, (summary: any) => { return ( summary.encounter_type === 116 && summary.encounter_type_name === 'TRANSFERENCOUNTER' ); }); } } private getClinicalEncounters(summaries: any[]): any[] { if (summaries) { return _.filter(summaries, (summary: any) => { return summary.is_clinical_encounter === 1; }); } } private showPinkBackground(): void { const color = this.backgroundColor.pink; this.addBackground.emit(color); } private showYellowBackground(): void { const color = this.backgroundColor.yellow; this.addBackground.emit(color); } private patientReturnedToCare(): boolean { return this.hasSubsequentClinicalEncounter ? true : false; } private isNonAmpathTransferOut(care_status_id)
private isIntraAmpathTransferFromCurrentLocation(care_status_id) { const intraAmpathTransferOutConceptIds = [1285, 1286, 9068, 9504]; if ( intraAmpathTransferOutConceptIds.includes(care_status_id) && this.hasMatchingLocation() ) { return true; } if ( care_status_id === 9080 && this.hasTransferEncounter && this.hasMatchingLocation() ) { return true; } if (care_status_id === 1594 && this.hasMatchingLocation()) { return true; } return false; } private hasMatchingLocation() { const currentlyLoggedInLocation = this.userDefaultPropertiesService.getCurrentUserDefaultLocation(); if (this.latestEncounterLocation) { return this.latestEncounterLocation.display === currentlyLoggedInLocation; } } private getlatestVlResult(hivSummaryData) { const orderByVlDate = _.orderBy( hivSummaryData, (hivSummary) => { return moment(hivSummary.vl_1_date); }, ['desc'] ); return orderByVlDate[0]; } private _toProperCase(text: string) { text = text || ''; return text.replace(/\w\S*/g, (txt) => { return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase(); }); } private checkViremia(latestVl) { if (latestVl >= 1 && latestVl <= 999) { this.lowViremia = true; this.viremiaAlert = 'Low'; } if (latestVl >= 1000) { this.highViremia = true; this.viremiaAlert = 'High'; } } private checkGbvScreening(screeningResult) { if ( screeningResult === 1 && this.curProgram.uuid === this.patientPrograms[0].programUuid ? true : false ) { return 'POSITIVE'; } return false; } public getMoriskyScore() { const previousEncounters = this.getPreviousEncounters( this.patient.encounters ); this.getPreviousEncounterDetails(previousEncounters).then((data) => { this.obs = data[0].obs; this.obs.forEach((obs) => { const morisky4_concept_uuid = '315472dc-2b5e-4add-b3b7-bbcf21a8959b'; const morisky8_concept_uuid = '857caa4e-b566-4a43-ab78-f911c1a8a727'; if (obs.concept.uuid === morisky4_concept_uuid) { this.moriskyScore4 = obs.value; this.ismoriskyScore4 = true; } else if (obs.concept.uuid === morisky8_concept_uuid) { this.ismoriskyScore8 = true; this.moriskyScore8 = obs.value; } }); if (this.ismoriskyScore8) { this.getMorisky8(); } else if (!this.ismoriskyScore8 && this.ismoriskyScore4) { this.getMorisky4(); } else if (!this.ismoriskyScore8 && !this.ismoriskyScore4) { this.setNullMorisky(); } if (this.moriskyScore >= 0 && this.moriskyScore <= 0.25) { this.isMoriskyScorePoorOrInadequate = false; } else if (this.moriskyScore >= 0.5) { this.isMoriskyScorePoorOrInadequate = true; } }); } public getAllEncounters(encounters) { const allEncounters = []; encounters = this.patient.encounters; _.each(encounters, (encounter: any) => { allEncounters.push(encounter); }); return allEncounters; } public getPreviousEncounters(allEncounters) { const previousEncounters = []; _.each(allEncounters, (encounter: any) => { const encounterType = encounter.encounterType.uuid; const encounterDate = moment(encounter.encounterDatetime).format( 'YYYY-MM-DD-HH' ); if (encounterType === '8d5b2be0-c2cc-11de-8d13-0010c6dffd0f') { // Adult Return encounter if ( encounterDate === this.getLastAdultReturnEncounterDate(allEncounters) ) { previousEncounters.push(encounter); } } }); return previousEncounters; } public getLastAdultReturnEncounterDate(allEncounters) { const max_date: any[] = []; _.each(allEncounters, (encounter: any) => { const encounterDate = moment(encounter.encounterDatetime).format( 'YYYY-MM-DD-HH' ); const today = moment().format('YYYY-MM-DD-HH'); if (encounterDate !== today) { max_date.push(encounterDate); } }); return this.getMaximumDate(max_date); } public getPreviousEncounterDetails(previousEncounters) { return new Promise((resolve, reject) => { const encounterWithDetails = []; let encounterCount = 0; let resultCount = 0; const checkCount = () => { if (resultCount === encounterCount) { resolve(encounterWithDetails); } }; _.each(previousEncounters, (encounterDetail: any) => { const encounterUuid = encounterDetail.uuid; encounterCount++; this.encounterResourceService .getEncounterByUuid(encounterUuid) .pipe( /* tslint:disable-next-line: no-shadowed-variable */ take(1) ) .subscribe((encDetail) => { encounterWithDetails.push(encDetail); resultCount++; checkCount(); }); }); }); } public getMorisky4() { this.moriskyScore = this.moriskyScore4; this.moriskyDenominator = '/4'; if (this.moriskyScore === 0) { this.moriskyRating = 'Good'; } else if (this.moriskyScore > 0 && this.moriskyScore < 3) { this.moriskyRating = 'Inadequate'; } } public getMorisky8() { this.moriskyScore = this.moriskyScore8; this.moriskyDenominator = '/8'; this.moriskyRating = 'Poor'; this.isMoriskyScorePoorOrInadequate = true; } public setNullMorisky() { this.moriskyScore = ''; this.moriskyDenominator = ''; this.moriskyRating = 'No value'; } public getMaximumDate(all_dates) { let max_dt = all_dates[0], max_dtObj = new Date(all_dates[0]); all_dates.forEach(function (dt, index) { if (new Date(dt) > max_dtObj) { max_dt = dt; max_dtObj = new Date(dt); } }); return max_dt; } public getPatientCervicalScreeningSummary(patientUuid: string): void { this.cervicalCancerScreeningSummaryService .getCervicalCancerScreeningSummary(patientUuid) .subscribe( (result) => { if (result) { this.cervicalScreeningSummary = result; if (result.length > 0) { this.latestCervicalScreeningSummary = result[0]; } } }, (error) => { console.log('Error', error); } ); } }
{ return ( care_status_id === 1287 || care_status_id === 5622 || care_status_id === 10502 ); }
identifier_body
hiv-program-snapshot.component.ts
import { OnInit, Component, Input, Output, EventEmitter } from '@angular/core'; import { Subscription } from 'rxjs'; import { take, finalize } from 'rxjs/operators'; import * as _ from 'lodash'; import * as moment from 'moment'; import { Patient } from '../../../models/patient.model'; import { HivSummaryResourceService } from '../../../etl-api/hiv-summary-resource.service'; import { LocationResourceService } from '../../../openmrs-api/location-resource.service'; import { EncounterResourceService } from 'src/app/openmrs-api/encounter-resource.service'; import { UserDefaultPropertiesService } from '../../../user-default-properties/user-default-properties.service'; import { CervicalCancerScreeningSummaResourceService } from './../../../etl-api/cervical-cancer-screening-summary-resource.service'; const mdtProgramUuid = 'c4246ff0-b081-460c-bcc5-b0678012659e'; const stdProgramUuid = '781d85b0-1359-11df-a1f1-0026b9348838'; const HivNegativesProgram = [ 'c19aec66-1a40-4588-9b03-b6be55a8dd1d', '96047aaf-7ab3-45e9-be6a-b61810fe617d' ]; @Component({ selector: 'hiv-snapshot', styleUrls: ['./hiv-program-snapshot.component.css'], templateUrl: './hiv-program-snapshot.component.html' }) export class HivProgramSnapshotComponent implements OnInit { @Input() public set enrolledProgrames(enrolledProgrames) { this.patientPrograms = enrolledProgrames; } @Input() public set program(program) { this.showViremiaAlert = program.uuid === mdtProgramUuid ? true : false; this.hasMoriskyScore = program.uuid === stdProgramUuid ? true : false; this.curProgram = program; _.each(HivNegativesProgram, (p) => { if (p === program.uuid) { this.displayProgram = false; } }); } @Input() public patient: Patient; @Output() public addBackground = new EventEmitter(); public hasError = false; public hasData = false; public hasMoriskyScore = false; public clinicalEncounters: any[] = []; public patientData: any = {}; public loadingData = false; public hasLoadedData = false; public prev_encounter_date: any = ''; public isVirallyUnsuppressed = false; public patientCareStatus: any; public hasTransferEncounter = false; public latestEncounterLocation: any = {}; public hasSubsequentClinicalEncounter = false; public resolvedCareStatus: any; public showCareStatus = true; public backgroundColor: any = { pink: '#FFC0CB', yellow: '#FFFF00' }; public viremiaAlert: string; public showViremiaAlert: boolean; public lowViremia: boolean; public highViremia: boolean; public currentPatientSub: Subscription; public _patient: Patient = new Patient({}); public moriskyScore: any = ''; public moriskyScore4: any = ''; public moriskyScore8: any = ''; public ismoriskyScore8 = false; public ismoriskyScore4 = false; public moriskyDenominator: any = ''; public moriskyRating: any = ''; public isMoriskyScorePoorOrInadequate = false; public hivDisclosureStatus: any; public latestCervicalScreeningSummary = []; public cervicalScreeningSummary = []; private obs: any[] = []; private gbvScreeningResult: any; private curProgram: any; private patientPrograms: any; public displayProgram = true; public gbvScreeningLabel: String; constructor( private hivSummaryResourceService: HivSummaryResourceService, private encounterResourceService: EncounterResourceService, private locationResource: LocationResourceService, private userDefaultPropertiesService: UserDefaultPropertiesService, private cervicalCancerScreeningSummaryService: CervicalCancerScreeningSummaResourceService ) {} public ngOnInit() { _.delay( (patientUuid) => { if (_.isNil(this.patient)) { this.hasError = true; } else { this.hasData = false; this.getHivSummary(patientUuid); this.getPatientCervicalScreeningSummary(patientUuid); this.patient.person.age > 19 ? (this.gbvScreeningLabel = 'GBV Screening') : (this.gbvScreeningLabel = 'VAC Screening'); } }, 0, this.patient.uuid ); this.getMoriskyScore(); } public getHivSummary(patientUuid) { this.loadingData = true; this.hivSummaryResourceService .getHivSummary(patientUuid, 0, 10) .pipe(take(1)) .subscribe((results) => { let latestVlResult: any; let latestVlDate = ''; let latestVl = null; this.loadingData = false; this.hasLoadedData = true; if (results[0]) { latestVlResult = this.getlatestVlResult(results); latestVlDate = latestVlResult.vl_1_date; latestVl = latestVlResult.vl_1; latestVl = latestVlResult.vl_1; this.patientCareStatus = results[0].patient_care_status; this.hivDisclosureStatus = results[0].hiv_status_disclosed === 1 ? 'Yes' : 'No'; if (this.showViremiaAlert) { this.checkViremia(latestVl); } this.gbvScreeningResult = this.checkGbvScreening( results[0].gbv_screening_result ); } this.clinicalEncounters = this.getClinicalEncounters(results); const latestClinicalEncounter = _.first(this.clinicalEncounters); this.hasTransferEncounter = this.checkIfHasTransferEncounter(results); const transferEncounterIndex = this.getIndexOfTransferEncounter( results ); // Did the patient have a clinical encounter following their transfer encounter i.e. did they return to care? this.hasSubsequentClinicalEncounter = results.indexOf(latestClinicalEncounter) < transferEncounterIndex ? true : false; this.patientData = _.first(this.clinicalEncounters); const patientDataCopy = this.patientData; if (!_.isNil(this.patientData)) { // assign latest vl and vl_1_date this.patientData = Object.assign(patientDataCopy, { vl_1_date: latestVlDate, vl_1: latestVl }); // flag red if VL > 1000 && (vl_1_date > (arv_start_date + 6 months)) if ( (this.patientData.vl_1 > 1000 && moment(this.patientData.vl_1_date) > moment(this.patientData.arv_start_date).add(6, 'months')) || this.patientData.prev_arv_line !== this.patientData.cur_arv_line ) { this.isVirallyUnsuppressed = true; } this.hasData = true; this.latestEncounterLocation = null; if (this.patientData.location_uuid) { this.resolveLastEncounterLocation(this.patientData.location_uuid); } } }); } public resolveLastEncounterLocation(location_uuid) { this.locationResource .getLocationByUuid(location_uuid, true) .pipe( finalize(() => { this.resolvedCareStatus = this.getPatientCareStatus( this.patientCareStatus ); }) ) .subscribe( (location) => { this.latestEncounterLocation = location; }, (error) => { console.error('Error resolving locations', error); } ); } public getPatientCareStatus(care_status_id: any) { const translateMap = { '159': 'DECEASED', '9079': 'UNTRACEABLE', '9080': 'PROCESS OF BEING TRACED', '9036': 'HIV NEGATIVE, NO LONGER AT RISK', '9083': 'SELF DISENGAGED FROM CARE', '6101': 'CONTINUE WITH CARE', '1285': 'TRANSFER CARE TO OTHER CENTER', '1286': 'TRANSFER TO AMPATH FACILITY', '1287': 'TRANSFER TO NON-AMPATH FACILITY', '9068': 'TRANSFER TO AMPATH FACILITY, NON-AMRS', '9504': 'TRANSFER TO MATERNAL CHILD HEALTH', '1594': 'PATIENT TRANSFERRED OUT', '9578': 'ENROLL IN AMPATH FACILITY', '9164': 'ENROLL CARE IN ANOTHER HEALTH FACILITY', '1732': 'AMPATH CLINIC TRANSFER', '9579': 'CONTINUE CARE IN OTHER FACILITY', '9580': 'FOLLOW-UP CARE PLAN, NOT SURE', '5622': 'OTHER', '10502': 'NON AMPATH CLINIC TRANSFER' }; /* if the patient transferred out and their care status is 'Continue with Care' despite them not returning to care, apply a yellow background on their summary snapshot to mark them out as a Transfer Out. */ /* if the patient is active in care with a care status of 'Continue with Care' and they are past their RTC date by over 1 week, apply a pink background to their snapshot summary and hide their care status. */ if (care_status_id === 6101) { if (this.hasTransferEncounter && !this.patientReturnedToCare()) { this.showCareStatus = false; this.showYellowBackground(); } else if ( moment(this.patientData.rtc_date).add(1, 'week') < moment(new Date()) ) { this.showPinkBackground(); } } // if patient is a Transfer Out, apply a yellow background to their snapshot summary if ( (this.hasTransferEncounter && this.isNonAmpathTransferOut(care_status_id)) || this.isIntraAmpathTransferFromCurrentLocation(care_status_id) ) { this.showYellowBackground(); } return this._toProperCase(translateMap[care_status_id]); } private checkIfHasTransferEncounter(summaries: any[]): boolean { if (summaries) { return _.some(summaries, (summary: any) => { return ( summary.encounter_type === 116 && summary.encounter_type_name === 'TRANSFERENCOUNTER' ); }); } } private getIndexOfTransferEncounter(summaries: any[]): number { if (summaries) { return _.findIndex(summaries, (summary: any) => { return ( summary.encounter_type === 116 && summary.encounter_type_name === 'TRANSFERENCOUNTER' ); }); } } private getClinicalEncounters(summaries: any[]): any[] { if (summaries) { return _.filter(summaries, (summary: any) => { return summary.is_clinical_encounter === 1; }); } } private showPinkBackground(): void { const color = this.backgroundColor.pink; this.addBackground.emit(color); } private showYellowBackground(): void { const color = this.backgroundColor.yellow; this.addBackground.emit(color); }
return ( care_status_id === 1287 || care_status_id === 5622 || care_status_id === 10502 ); } private isIntraAmpathTransferFromCurrentLocation(care_status_id) { const intraAmpathTransferOutConceptIds = [1285, 1286, 9068, 9504]; if ( intraAmpathTransferOutConceptIds.includes(care_status_id) && this.hasMatchingLocation() ) { return true; } if ( care_status_id === 9080 && this.hasTransferEncounter && this.hasMatchingLocation() ) { return true; } if (care_status_id === 1594 && this.hasMatchingLocation()) { return true; } return false; } private hasMatchingLocation() { const currentlyLoggedInLocation = this.userDefaultPropertiesService.getCurrentUserDefaultLocation(); if (this.latestEncounterLocation) { return this.latestEncounterLocation.display === currentlyLoggedInLocation; } } private getlatestVlResult(hivSummaryData) { const orderByVlDate = _.orderBy( hivSummaryData, (hivSummary) => { return moment(hivSummary.vl_1_date); }, ['desc'] ); return orderByVlDate[0]; } private _toProperCase(text: string) { text = text || ''; return text.replace(/\w\S*/g, (txt) => { return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase(); }); } private checkViremia(latestVl) { if (latestVl >= 1 && latestVl <= 999) { this.lowViremia = true; this.viremiaAlert = 'Low'; } if (latestVl >= 1000) { this.highViremia = true; this.viremiaAlert = 'High'; } } private checkGbvScreening(screeningResult) { if ( screeningResult === 1 && this.curProgram.uuid === this.patientPrograms[0].programUuid ? true : false ) { return 'POSITIVE'; } return false; } public getMoriskyScore() { const previousEncounters = this.getPreviousEncounters( this.patient.encounters ); this.getPreviousEncounterDetails(previousEncounters).then((data) => { this.obs = data[0].obs; this.obs.forEach((obs) => { const morisky4_concept_uuid = '315472dc-2b5e-4add-b3b7-bbcf21a8959b'; const morisky8_concept_uuid = '857caa4e-b566-4a43-ab78-f911c1a8a727'; if (obs.concept.uuid === morisky4_concept_uuid) { this.moriskyScore4 = obs.value; this.ismoriskyScore4 = true; } else if (obs.concept.uuid === morisky8_concept_uuid) { this.ismoriskyScore8 = true; this.moriskyScore8 = obs.value; } }); if (this.ismoriskyScore8) { this.getMorisky8(); } else if (!this.ismoriskyScore8 && this.ismoriskyScore4) { this.getMorisky4(); } else if (!this.ismoriskyScore8 && !this.ismoriskyScore4) { this.setNullMorisky(); } if (this.moriskyScore >= 0 && this.moriskyScore <= 0.25) { this.isMoriskyScorePoorOrInadequate = false; } else if (this.moriskyScore >= 0.5) { this.isMoriskyScorePoorOrInadequate = true; } }); } public getAllEncounters(encounters) { const allEncounters = []; encounters = this.patient.encounters; _.each(encounters, (encounter: any) => { allEncounters.push(encounter); }); return allEncounters; } public getPreviousEncounters(allEncounters) { const previousEncounters = []; _.each(allEncounters, (encounter: any) => { const encounterType = encounter.encounterType.uuid; const encounterDate = moment(encounter.encounterDatetime).format( 'YYYY-MM-DD-HH' ); if (encounterType === '8d5b2be0-c2cc-11de-8d13-0010c6dffd0f') { // Adult Return encounter if ( encounterDate === this.getLastAdultReturnEncounterDate(allEncounters) ) { previousEncounters.push(encounter); } } }); return previousEncounters; } public getLastAdultReturnEncounterDate(allEncounters) { const max_date: any[] = []; _.each(allEncounters, (encounter: any) => { const encounterDate = moment(encounter.encounterDatetime).format( 'YYYY-MM-DD-HH' ); const today = moment().format('YYYY-MM-DD-HH'); if (encounterDate !== today) { max_date.push(encounterDate); } }); return this.getMaximumDate(max_date); } public getPreviousEncounterDetails(previousEncounters) { return new Promise((resolve, reject) => { const encounterWithDetails = []; let encounterCount = 0; let resultCount = 0; const checkCount = () => { if (resultCount === encounterCount) { resolve(encounterWithDetails); } }; _.each(previousEncounters, (encounterDetail: any) => { const encounterUuid = encounterDetail.uuid; encounterCount++; this.encounterResourceService .getEncounterByUuid(encounterUuid) .pipe( /* tslint:disable-next-line: no-shadowed-variable */ take(1) ) .subscribe((encDetail) => { encounterWithDetails.push(encDetail); resultCount++; checkCount(); }); }); }); } public getMorisky4() { this.moriskyScore = this.moriskyScore4; this.moriskyDenominator = '/4'; if (this.moriskyScore === 0) { this.moriskyRating = 'Good'; } else if (this.moriskyScore > 0 && this.moriskyScore < 3) { this.moriskyRating = 'Inadequate'; } } public getMorisky8() { this.moriskyScore = this.moriskyScore8; this.moriskyDenominator = '/8'; this.moriskyRating = 'Poor'; this.isMoriskyScorePoorOrInadequate = true; } public setNullMorisky() { this.moriskyScore = ''; this.moriskyDenominator = ''; this.moriskyRating = 'No value'; } public getMaximumDate(all_dates) { let max_dt = all_dates[0], max_dtObj = new Date(all_dates[0]); all_dates.forEach(function (dt, index) { if (new Date(dt) > max_dtObj) { max_dt = dt; max_dtObj = new Date(dt); } }); return max_dt; } public getPatientCervicalScreeningSummary(patientUuid: string): void { this.cervicalCancerScreeningSummaryService .getCervicalCancerScreeningSummary(patientUuid) .subscribe( (result) => { if (result) { this.cervicalScreeningSummary = result; if (result.length > 0) { this.latestCervicalScreeningSummary = result[0]; } } }, (error) => { console.log('Error', error); } ); } }
private patientReturnedToCare(): boolean { return this.hasSubsequentClinicalEncounter ? true : false; } private isNonAmpathTransferOut(care_status_id) {
random_line_split
hiv-program-snapshot.component.ts
import { OnInit, Component, Input, Output, EventEmitter } from '@angular/core'; import { Subscription } from 'rxjs'; import { take, finalize } from 'rxjs/operators'; import * as _ from 'lodash'; import * as moment from 'moment'; import { Patient } from '../../../models/patient.model'; import { HivSummaryResourceService } from '../../../etl-api/hiv-summary-resource.service'; import { LocationResourceService } from '../../../openmrs-api/location-resource.service'; import { EncounterResourceService } from 'src/app/openmrs-api/encounter-resource.service'; import { UserDefaultPropertiesService } from '../../../user-default-properties/user-default-properties.service'; import { CervicalCancerScreeningSummaResourceService } from './../../../etl-api/cervical-cancer-screening-summary-resource.service'; const mdtProgramUuid = 'c4246ff0-b081-460c-bcc5-b0678012659e'; const stdProgramUuid = '781d85b0-1359-11df-a1f1-0026b9348838'; const HivNegativesProgram = [ 'c19aec66-1a40-4588-9b03-b6be55a8dd1d', '96047aaf-7ab3-45e9-be6a-b61810fe617d' ]; @Component({ selector: 'hiv-snapshot', styleUrls: ['./hiv-program-snapshot.component.css'], templateUrl: './hiv-program-snapshot.component.html' }) export class HivProgramSnapshotComponent implements OnInit { @Input() public set enrolledProgrames(enrolledProgrames) { this.patientPrograms = enrolledProgrames; } @Input() public set program(program) { this.showViremiaAlert = program.uuid === mdtProgramUuid ? true : false; this.hasMoriskyScore = program.uuid === stdProgramUuid ? true : false; this.curProgram = program; _.each(HivNegativesProgram, (p) => { if (p === program.uuid) { this.displayProgram = false; } }); } @Input() public patient: Patient; @Output() public addBackground = new EventEmitter(); public hasError = false; public hasData = false; public hasMoriskyScore = false; public clinicalEncounters: any[] = []; public patientData: any = {}; public loadingData = false; public hasLoadedData = false; public prev_encounter_date: any = ''; public isVirallyUnsuppressed = false; public patientCareStatus: any; public hasTransferEncounter = false; public latestEncounterLocation: any = {}; public hasSubsequentClinicalEncounter = false; public resolvedCareStatus: any; public showCareStatus = true; public backgroundColor: any = { pink: '#FFC0CB', yellow: '#FFFF00' }; public viremiaAlert: string; public showViremiaAlert: boolean; public lowViremia: boolean; public highViremia: boolean; public currentPatientSub: Subscription; public _patient: Patient = new Patient({}); public moriskyScore: any = ''; public moriskyScore4: any = ''; public moriskyScore8: any = ''; public ismoriskyScore8 = false; public ismoriskyScore4 = false; public moriskyDenominator: any = ''; public moriskyRating: any = ''; public isMoriskyScorePoorOrInadequate = false; public hivDisclosureStatus: any; public latestCervicalScreeningSummary = []; public cervicalScreeningSummary = []; private obs: any[] = []; private gbvScreeningResult: any; private curProgram: any; private patientPrograms: any; public displayProgram = true; public gbvScreeningLabel: String; constructor( private hivSummaryResourceService: HivSummaryResourceService, private encounterResourceService: EncounterResourceService, private locationResource: LocationResourceService, private userDefaultPropertiesService: UserDefaultPropertiesService, private cervicalCancerScreeningSummaryService: CervicalCancerScreeningSummaResourceService ) {} public ngOnInit() { _.delay( (patientUuid) => { if (_.isNil(this.patient)) { this.hasError = true; } else { this.hasData = false; this.getHivSummary(patientUuid); this.getPatientCervicalScreeningSummary(patientUuid); this.patient.person.age > 19 ? (this.gbvScreeningLabel = 'GBV Screening') : (this.gbvScreeningLabel = 'VAC Screening'); } }, 0, this.patient.uuid ); this.getMoriskyScore(); } public getHivSummary(patientUuid) { this.loadingData = true; this.hivSummaryResourceService .getHivSummary(patientUuid, 0, 10) .pipe(take(1)) .subscribe((results) => { let latestVlResult: any; let latestVlDate = ''; let latestVl = null; this.loadingData = false; this.hasLoadedData = true; if (results[0]) { latestVlResult = this.getlatestVlResult(results); latestVlDate = latestVlResult.vl_1_date; latestVl = latestVlResult.vl_1; latestVl = latestVlResult.vl_1; this.patientCareStatus = results[0].patient_care_status; this.hivDisclosureStatus = results[0].hiv_status_disclosed === 1 ? 'Yes' : 'No'; if (this.showViremiaAlert) { this.checkViremia(latestVl); } this.gbvScreeningResult = this.checkGbvScreening( results[0].gbv_screening_result ); } this.clinicalEncounters = this.getClinicalEncounters(results); const latestClinicalEncounter = _.first(this.clinicalEncounters); this.hasTransferEncounter = this.checkIfHasTransferEncounter(results); const transferEncounterIndex = this.getIndexOfTransferEncounter( results ); // Did the patient have a clinical encounter following their transfer encounter i.e. did they return to care? this.hasSubsequentClinicalEncounter = results.indexOf(latestClinicalEncounter) < transferEncounterIndex ? true : false; this.patientData = _.first(this.clinicalEncounters); const patientDataCopy = this.patientData; if (!_.isNil(this.patientData)) { // assign latest vl and vl_1_date this.patientData = Object.assign(patientDataCopy, { vl_1_date: latestVlDate, vl_1: latestVl }); // flag red if VL > 1000 && (vl_1_date > (arv_start_date + 6 months)) if ( (this.patientData.vl_1 > 1000 && moment(this.patientData.vl_1_date) > moment(this.patientData.arv_start_date).add(6, 'months')) || this.patientData.prev_arv_line !== this.patientData.cur_arv_line ) { this.isVirallyUnsuppressed = true; } this.hasData = true; this.latestEncounterLocation = null; if (this.patientData.location_uuid) { this.resolveLastEncounterLocation(this.patientData.location_uuid); } } }); } public resolveLastEncounterLocation(location_uuid) { this.locationResource .getLocationByUuid(location_uuid, true) .pipe( finalize(() => { this.resolvedCareStatus = this.getPatientCareStatus( this.patientCareStatus ); }) ) .subscribe( (location) => { this.latestEncounterLocation = location; }, (error) => { console.error('Error resolving locations', error); } ); } public getPatientCareStatus(care_status_id: any) { const translateMap = { '159': 'DECEASED', '9079': 'UNTRACEABLE', '9080': 'PROCESS OF BEING TRACED', '9036': 'HIV NEGATIVE, NO LONGER AT RISK', '9083': 'SELF DISENGAGED FROM CARE', '6101': 'CONTINUE WITH CARE', '1285': 'TRANSFER CARE TO OTHER CENTER', '1286': 'TRANSFER TO AMPATH FACILITY', '1287': 'TRANSFER TO NON-AMPATH FACILITY', '9068': 'TRANSFER TO AMPATH FACILITY, NON-AMRS', '9504': 'TRANSFER TO MATERNAL CHILD HEALTH', '1594': 'PATIENT TRANSFERRED OUT', '9578': 'ENROLL IN AMPATH FACILITY', '9164': 'ENROLL CARE IN ANOTHER HEALTH FACILITY', '1732': 'AMPATH CLINIC TRANSFER', '9579': 'CONTINUE CARE IN OTHER FACILITY', '9580': 'FOLLOW-UP CARE PLAN, NOT SURE', '5622': 'OTHER', '10502': 'NON AMPATH CLINIC TRANSFER' }; /* if the patient transferred out and their care status is 'Continue with Care' despite them not returning to care, apply a yellow background on their summary snapshot to mark them out as a Transfer Out. */ /* if the patient is active in care with a care status of 'Continue with Care' and they are past their RTC date by over 1 week, apply a pink background to their snapshot summary and hide their care status. */ if (care_status_id === 6101) { if (this.hasTransferEncounter && !this.patientReturnedToCare()) { this.showCareStatus = false; this.showYellowBackground(); } else if ( moment(this.patientData.rtc_date).add(1, 'week') < moment(new Date()) ) { this.showPinkBackground(); } } // if patient is a Transfer Out, apply a yellow background to their snapshot summary if ( (this.hasTransferEncounter && this.isNonAmpathTransferOut(care_status_id)) || this.isIntraAmpathTransferFromCurrentLocation(care_status_id) ) { this.showYellowBackground(); } return this._toProperCase(translateMap[care_status_id]); } private checkIfHasTransferEncounter(summaries: any[]): boolean { if (summaries) { return _.some(summaries, (summary: any) => { return ( summary.encounter_type === 116 && summary.encounter_type_name === 'TRANSFERENCOUNTER' ); }); } } private getIndexOfTransferEncounter(summaries: any[]): number { if (summaries) { return _.findIndex(summaries, (summary: any) => { return ( summary.encounter_type === 116 && summary.encounter_type_name === 'TRANSFERENCOUNTER' ); }); } } private getClinicalEncounters(summaries: any[]): any[] { if (summaries) { return _.filter(summaries, (summary: any) => { return summary.is_clinical_encounter === 1; }); } } private showPinkBackground(): void { const color = this.backgroundColor.pink; this.addBackground.emit(color); } private showYellowBackground(): void { const color = this.backgroundColor.yellow; this.addBackground.emit(color); } private patientReturnedToCare(): boolean { return this.hasSubsequentClinicalEncounter ? true : false; } private
(care_status_id) { return ( care_status_id === 1287 || care_status_id === 5622 || care_status_id === 10502 ); } private isIntraAmpathTransferFromCurrentLocation(care_status_id) { const intraAmpathTransferOutConceptIds = [1285, 1286, 9068, 9504]; if ( intraAmpathTransferOutConceptIds.includes(care_status_id) && this.hasMatchingLocation() ) { return true; } if ( care_status_id === 9080 && this.hasTransferEncounter && this.hasMatchingLocation() ) { return true; } if (care_status_id === 1594 && this.hasMatchingLocation()) { return true; } return false; } private hasMatchingLocation() { const currentlyLoggedInLocation = this.userDefaultPropertiesService.getCurrentUserDefaultLocation(); if (this.latestEncounterLocation) { return this.latestEncounterLocation.display === currentlyLoggedInLocation; } } private getlatestVlResult(hivSummaryData) { const orderByVlDate = _.orderBy( hivSummaryData, (hivSummary) => { return moment(hivSummary.vl_1_date); }, ['desc'] ); return orderByVlDate[0]; } private _toProperCase(text: string) { text = text || ''; return text.replace(/\w\S*/g, (txt) => { return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase(); }); } private checkViremia(latestVl) { if (latestVl >= 1 && latestVl <= 999) { this.lowViremia = true; this.viremiaAlert = 'Low'; } if (latestVl >= 1000) { this.highViremia = true; this.viremiaAlert = 'High'; } } private checkGbvScreening(screeningResult) { if ( screeningResult === 1 && this.curProgram.uuid === this.patientPrograms[0].programUuid ? true : false ) { return 'POSITIVE'; } return false; } public getMoriskyScore() { const previousEncounters = this.getPreviousEncounters( this.patient.encounters ); this.getPreviousEncounterDetails(previousEncounters).then((data) => { this.obs = data[0].obs; this.obs.forEach((obs) => { const morisky4_concept_uuid = '315472dc-2b5e-4add-b3b7-bbcf21a8959b'; const morisky8_concept_uuid = '857caa4e-b566-4a43-ab78-f911c1a8a727'; if (obs.concept.uuid === morisky4_concept_uuid) { this.moriskyScore4 = obs.value; this.ismoriskyScore4 = true; } else if (obs.concept.uuid === morisky8_concept_uuid) { this.ismoriskyScore8 = true; this.moriskyScore8 = obs.value; } }); if (this.ismoriskyScore8) { this.getMorisky8(); } else if (!this.ismoriskyScore8 && this.ismoriskyScore4) { this.getMorisky4(); } else if (!this.ismoriskyScore8 && !this.ismoriskyScore4) { this.setNullMorisky(); } if (this.moriskyScore >= 0 && this.moriskyScore <= 0.25) { this.isMoriskyScorePoorOrInadequate = false; } else if (this.moriskyScore >= 0.5) { this.isMoriskyScorePoorOrInadequate = true; } }); } public getAllEncounters(encounters) { const allEncounters = []; encounters = this.patient.encounters; _.each(encounters, (encounter: any) => { allEncounters.push(encounter); }); return allEncounters; } public getPreviousEncounters(allEncounters) { const previousEncounters = []; _.each(allEncounters, (encounter: any) => { const encounterType = encounter.encounterType.uuid; const encounterDate = moment(encounter.encounterDatetime).format( 'YYYY-MM-DD-HH' ); if (encounterType === '8d5b2be0-c2cc-11de-8d13-0010c6dffd0f') { // Adult Return encounter if ( encounterDate === this.getLastAdultReturnEncounterDate(allEncounters) ) { previousEncounters.push(encounter); } } }); return previousEncounters; } public getLastAdultReturnEncounterDate(allEncounters) { const max_date: any[] = []; _.each(allEncounters, (encounter: any) => { const encounterDate = moment(encounter.encounterDatetime).format( 'YYYY-MM-DD-HH' ); const today = moment().format('YYYY-MM-DD-HH'); if (encounterDate !== today) { max_date.push(encounterDate); } }); return this.getMaximumDate(max_date); } public getPreviousEncounterDetails(previousEncounters) { return new Promise((resolve, reject) => { const encounterWithDetails = []; let encounterCount = 0; let resultCount = 0; const checkCount = () => { if (resultCount === encounterCount) { resolve(encounterWithDetails); } }; _.each(previousEncounters, (encounterDetail: any) => { const encounterUuid = encounterDetail.uuid; encounterCount++; this.encounterResourceService .getEncounterByUuid(encounterUuid) .pipe( /* tslint:disable-next-line: no-shadowed-variable */ take(1) ) .subscribe((encDetail) => { encounterWithDetails.push(encDetail); resultCount++; checkCount(); }); }); }); } public getMorisky4() { this.moriskyScore = this.moriskyScore4; this.moriskyDenominator = '/4'; if (this.moriskyScore === 0) { this.moriskyRating = 'Good'; } else if (this.moriskyScore > 0 && this.moriskyScore < 3) { this.moriskyRating = 'Inadequate'; } } public getMorisky8() { this.moriskyScore = this.moriskyScore8; this.moriskyDenominator = '/8'; this.moriskyRating = 'Poor'; this.isMoriskyScorePoorOrInadequate = true; } public setNullMorisky() { this.moriskyScore = ''; this.moriskyDenominator = ''; this.moriskyRating = 'No value'; } public getMaximumDate(all_dates) { let max_dt = all_dates[0], max_dtObj = new Date(all_dates[0]); all_dates.forEach(function (dt, index) { if (new Date(dt) > max_dtObj) { max_dt = dt; max_dtObj = new Date(dt); } }); return max_dt; } public getPatientCervicalScreeningSummary(patientUuid: string): void { this.cervicalCancerScreeningSummaryService .getCervicalCancerScreeningSummary(patientUuid) .subscribe( (result) => { if (result) { this.cervicalScreeningSummary = result; if (result.length > 0) { this.latestCervicalScreeningSummary = result[0]; } } }, (error) => { console.log('Error', error); } ); } }
isNonAmpathTransferOut
identifier_name
hiv-program-snapshot.component.ts
import { OnInit, Component, Input, Output, EventEmitter } from '@angular/core'; import { Subscription } from 'rxjs'; import { take, finalize } from 'rxjs/operators'; import * as _ from 'lodash'; import * as moment from 'moment'; import { Patient } from '../../../models/patient.model'; import { HivSummaryResourceService } from '../../../etl-api/hiv-summary-resource.service'; import { LocationResourceService } from '../../../openmrs-api/location-resource.service'; import { EncounterResourceService } from 'src/app/openmrs-api/encounter-resource.service'; import { UserDefaultPropertiesService } from '../../../user-default-properties/user-default-properties.service'; import { CervicalCancerScreeningSummaResourceService } from './../../../etl-api/cervical-cancer-screening-summary-resource.service'; const mdtProgramUuid = 'c4246ff0-b081-460c-bcc5-b0678012659e'; const stdProgramUuid = '781d85b0-1359-11df-a1f1-0026b9348838'; const HivNegativesProgram = [ 'c19aec66-1a40-4588-9b03-b6be55a8dd1d', '96047aaf-7ab3-45e9-be6a-b61810fe617d' ]; @Component({ selector: 'hiv-snapshot', styleUrls: ['./hiv-program-snapshot.component.css'], templateUrl: './hiv-program-snapshot.component.html' }) export class HivProgramSnapshotComponent implements OnInit { @Input() public set enrolledProgrames(enrolledProgrames) { this.patientPrograms = enrolledProgrames; } @Input() public set program(program) { this.showViremiaAlert = program.uuid === mdtProgramUuid ? true : false; this.hasMoriskyScore = program.uuid === stdProgramUuid ? true : false; this.curProgram = program; _.each(HivNegativesProgram, (p) => { if (p === program.uuid) { this.displayProgram = false; } }); } @Input() public patient: Patient; @Output() public addBackground = new EventEmitter(); public hasError = false; public hasData = false; public hasMoriskyScore = false; public clinicalEncounters: any[] = []; public patientData: any = {}; public loadingData = false; public hasLoadedData = false; public prev_encounter_date: any = ''; public isVirallyUnsuppressed = false; public patientCareStatus: any; public hasTransferEncounter = false; public latestEncounterLocation: any = {}; public hasSubsequentClinicalEncounter = false; public resolvedCareStatus: any; public showCareStatus = true; public backgroundColor: any = { pink: '#FFC0CB', yellow: '#FFFF00' }; public viremiaAlert: string; public showViremiaAlert: boolean; public lowViremia: boolean; public highViremia: boolean; public currentPatientSub: Subscription; public _patient: Patient = new Patient({}); public moriskyScore: any = ''; public moriskyScore4: any = ''; public moriskyScore8: any = ''; public ismoriskyScore8 = false; public ismoriskyScore4 = false; public moriskyDenominator: any = ''; public moriskyRating: any = ''; public isMoriskyScorePoorOrInadequate = false; public hivDisclosureStatus: any; public latestCervicalScreeningSummary = []; public cervicalScreeningSummary = []; private obs: any[] = []; private gbvScreeningResult: any; private curProgram: any; private patientPrograms: any; public displayProgram = true; public gbvScreeningLabel: String; constructor( private hivSummaryResourceService: HivSummaryResourceService, private encounterResourceService: EncounterResourceService, private locationResource: LocationResourceService, private userDefaultPropertiesService: UserDefaultPropertiesService, private cervicalCancerScreeningSummaryService: CervicalCancerScreeningSummaResourceService ) {} public ngOnInit() { _.delay( (patientUuid) => { if (_.isNil(this.patient)) { this.hasError = true; } else { this.hasData = false; this.getHivSummary(patientUuid); this.getPatientCervicalScreeningSummary(patientUuid); this.patient.person.age > 19 ? (this.gbvScreeningLabel = 'GBV Screening') : (this.gbvScreeningLabel = 'VAC Screening'); } }, 0, this.patient.uuid ); this.getMoriskyScore(); } public getHivSummary(patientUuid) { this.loadingData = true; this.hivSummaryResourceService .getHivSummary(patientUuid, 0, 10) .pipe(take(1)) .subscribe((results) => { let latestVlResult: any; let latestVlDate = ''; let latestVl = null; this.loadingData = false; this.hasLoadedData = true; if (results[0]) { latestVlResult = this.getlatestVlResult(results); latestVlDate = latestVlResult.vl_1_date; latestVl = latestVlResult.vl_1; latestVl = latestVlResult.vl_1; this.patientCareStatus = results[0].patient_care_status; this.hivDisclosureStatus = results[0].hiv_status_disclosed === 1 ? 'Yes' : 'No'; if (this.showViremiaAlert) { this.checkViremia(latestVl); } this.gbvScreeningResult = this.checkGbvScreening( results[0].gbv_screening_result ); } this.clinicalEncounters = this.getClinicalEncounters(results); const latestClinicalEncounter = _.first(this.clinicalEncounters); this.hasTransferEncounter = this.checkIfHasTransferEncounter(results); const transferEncounterIndex = this.getIndexOfTransferEncounter( results ); // Did the patient have a clinical encounter following their transfer encounter i.e. did they return to care? this.hasSubsequentClinicalEncounter = results.indexOf(latestClinicalEncounter) < transferEncounterIndex ? true : false; this.patientData = _.first(this.clinicalEncounters); const patientDataCopy = this.patientData; if (!_.isNil(this.patientData)) { // assign latest vl and vl_1_date this.patientData = Object.assign(patientDataCopy, { vl_1_date: latestVlDate, vl_1: latestVl }); // flag red if VL > 1000 && (vl_1_date > (arv_start_date + 6 months)) if ( (this.patientData.vl_1 > 1000 && moment(this.patientData.vl_1_date) > moment(this.patientData.arv_start_date).add(6, 'months')) || this.patientData.prev_arv_line !== this.patientData.cur_arv_line ) { this.isVirallyUnsuppressed = true; } this.hasData = true; this.latestEncounterLocation = null; if (this.patientData.location_uuid) { this.resolveLastEncounterLocation(this.patientData.location_uuid); } } }); } public resolveLastEncounterLocation(location_uuid) { this.locationResource .getLocationByUuid(location_uuid, true) .pipe( finalize(() => { this.resolvedCareStatus = this.getPatientCareStatus( this.patientCareStatus ); }) ) .subscribe( (location) => { this.latestEncounterLocation = location; }, (error) => { console.error('Error resolving locations', error); } ); } public getPatientCareStatus(care_status_id: any) { const translateMap = { '159': 'DECEASED', '9079': 'UNTRACEABLE', '9080': 'PROCESS OF BEING TRACED', '9036': 'HIV NEGATIVE, NO LONGER AT RISK', '9083': 'SELF DISENGAGED FROM CARE', '6101': 'CONTINUE WITH CARE', '1285': 'TRANSFER CARE TO OTHER CENTER', '1286': 'TRANSFER TO AMPATH FACILITY', '1287': 'TRANSFER TO NON-AMPATH FACILITY', '9068': 'TRANSFER TO AMPATH FACILITY, NON-AMRS', '9504': 'TRANSFER TO MATERNAL CHILD HEALTH', '1594': 'PATIENT TRANSFERRED OUT', '9578': 'ENROLL IN AMPATH FACILITY', '9164': 'ENROLL CARE IN ANOTHER HEALTH FACILITY', '1732': 'AMPATH CLINIC TRANSFER', '9579': 'CONTINUE CARE IN OTHER FACILITY', '9580': 'FOLLOW-UP CARE PLAN, NOT SURE', '5622': 'OTHER', '10502': 'NON AMPATH CLINIC TRANSFER' }; /* if the patient transferred out and their care status is 'Continue with Care' despite them not returning to care, apply a yellow background on their summary snapshot to mark them out as a Transfer Out. */ /* if the patient is active in care with a care status of 'Continue with Care' and they are past their RTC date by over 1 week, apply a pink background to their snapshot summary and hide their care status. */ if (care_status_id === 6101) { if (this.hasTransferEncounter && !this.patientReturnedToCare()) { this.showCareStatus = false; this.showYellowBackground(); } else if ( moment(this.patientData.rtc_date).add(1, 'week') < moment(new Date()) ) { this.showPinkBackground(); } } // if patient is a Transfer Out, apply a yellow background to their snapshot summary if ( (this.hasTransferEncounter && this.isNonAmpathTransferOut(care_status_id)) || this.isIntraAmpathTransferFromCurrentLocation(care_status_id) ) { this.showYellowBackground(); } return this._toProperCase(translateMap[care_status_id]); } private checkIfHasTransferEncounter(summaries: any[]): boolean { if (summaries) { return _.some(summaries, (summary: any) => { return ( summary.encounter_type === 116 && summary.encounter_type_name === 'TRANSFERENCOUNTER' ); }); } } private getIndexOfTransferEncounter(summaries: any[]): number { if (summaries) { return _.findIndex(summaries, (summary: any) => { return ( summary.encounter_type === 116 && summary.encounter_type_name === 'TRANSFERENCOUNTER' ); }); } } private getClinicalEncounters(summaries: any[]): any[] { if (summaries) { return _.filter(summaries, (summary: any) => { return summary.is_clinical_encounter === 1; }); } } private showPinkBackground(): void { const color = this.backgroundColor.pink; this.addBackground.emit(color); } private showYellowBackground(): void { const color = this.backgroundColor.yellow; this.addBackground.emit(color); } private patientReturnedToCare(): boolean { return this.hasSubsequentClinicalEncounter ? true : false; } private isNonAmpathTransferOut(care_status_id) { return ( care_status_id === 1287 || care_status_id === 5622 || care_status_id === 10502 ); } private isIntraAmpathTransferFromCurrentLocation(care_status_id) { const intraAmpathTransferOutConceptIds = [1285, 1286, 9068, 9504]; if ( intraAmpathTransferOutConceptIds.includes(care_status_id) && this.hasMatchingLocation() ) { return true; } if ( care_status_id === 9080 && this.hasTransferEncounter && this.hasMatchingLocation() ) { return true; } if (care_status_id === 1594 && this.hasMatchingLocation()) { return true; } return false; } private hasMatchingLocation() { const currentlyLoggedInLocation = this.userDefaultPropertiesService.getCurrentUserDefaultLocation(); if (this.latestEncounterLocation) { return this.latestEncounterLocation.display === currentlyLoggedInLocation; } } private getlatestVlResult(hivSummaryData) { const orderByVlDate = _.orderBy( hivSummaryData, (hivSummary) => { return moment(hivSummary.vl_1_date); }, ['desc'] ); return orderByVlDate[0]; } private _toProperCase(text: string) { text = text || ''; return text.replace(/\w\S*/g, (txt) => { return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase(); }); } private checkViremia(latestVl) { if (latestVl >= 1 && latestVl <= 999) { this.lowViremia = true; this.viremiaAlert = 'Low'; } if (latestVl >= 1000) { this.highViremia = true; this.viremiaAlert = 'High'; } } private checkGbvScreening(screeningResult) { if ( screeningResult === 1 && this.curProgram.uuid === this.patientPrograms[0].programUuid ? true : false ) { return 'POSITIVE'; } return false; } public getMoriskyScore() { const previousEncounters = this.getPreviousEncounters( this.patient.encounters ); this.getPreviousEncounterDetails(previousEncounters).then((data) => { this.obs = data[0].obs; this.obs.forEach((obs) => { const morisky4_concept_uuid = '315472dc-2b5e-4add-b3b7-bbcf21a8959b'; const morisky8_concept_uuid = '857caa4e-b566-4a43-ab78-f911c1a8a727'; if (obs.concept.uuid === morisky4_concept_uuid) { this.moriskyScore4 = obs.value; this.ismoriskyScore4 = true; } else if (obs.concept.uuid === morisky8_concept_uuid) { this.ismoriskyScore8 = true; this.moriskyScore8 = obs.value; } }); if (this.ismoriskyScore8) { this.getMorisky8(); } else if (!this.ismoriskyScore8 && this.ismoriskyScore4) { this.getMorisky4(); } else if (!this.ismoriskyScore8 && !this.ismoriskyScore4) { this.setNullMorisky(); } if (this.moriskyScore >= 0 && this.moriskyScore <= 0.25) { this.isMoriskyScorePoorOrInadequate = false; } else if (this.moriskyScore >= 0.5) { this.isMoriskyScorePoorOrInadequate = true; } }); } public getAllEncounters(encounters) { const allEncounters = []; encounters = this.patient.encounters; _.each(encounters, (encounter: any) => { allEncounters.push(encounter); }); return allEncounters; } public getPreviousEncounters(allEncounters) { const previousEncounters = []; _.each(allEncounters, (encounter: any) => { const encounterType = encounter.encounterType.uuid; const encounterDate = moment(encounter.encounterDatetime).format( 'YYYY-MM-DD-HH' ); if (encounterType === '8d5b2be0-c2cc-11de-8d13-0010c6dffd0f') { // Adult Return encounter if ( encounterDate === this.getLastAdultReturnEncounterDate(allEncounters) ) { previousEncounters.push(encounter); } } }); return previousEncounters; } public getLastAdultReturnEncounterDate(allEncounters) { const max_date: any[] = []; _.each(allEncounters, (encounter: any) => { const encounterDate = moment(encounter.encounterDatetime).format( 'YYYY-MM-DD-HH' ); const today = moment().format('YYYY-MM-DD-HH'); if (encounterDate !== today) { max_date.push(encounterDate); } }); return this.getMaximumDate(max_date); } public getPreviousEncounterDetails(previousEncounters) { return new Promise((resolve, reject) => { const encounterWithDetails = []; let encounterCount = 0; let resultCount = 0; const checkCount = () => { if (resultCount === encounterCount) { resolve(encounterWithDetails); } }; _.each(previousEncounters, (encounterDetail: any) => { const encounterUuid = encounterDetail.uuid; encounterCount++; this.encounterResourceService .getEncounterByUuid(encounterUuid) .pipe( /* tslint:disable-next-line: no-shadowed-variable */ take(1) ) .subscribe((encDetail) => { encounterWithDetails.push(encDetail); resultCount++; checkCount(); }); }); }); } public getMorisky4() { this.moriskyScore = this.moriskyScore4; this.moriskyDenominator = '/4'; if (this.moriskyScore === 0) { this.moriskyRating = 'Good'; } else if (this.moriskyScore > 0 && this.moriskyScore < 3)
} public getMorisky8() { this.moriskyScore = this.moriskyScore8; this.moriskyDenominator = '/8'; this.moriskyRating = 'Poor'; this.isMoriskyScorePoorOrInadequate = true; } public setNullMorisky() { this.moriskyScore = ''; this.moriskyDenominator = ''; this.moriskyRating = 'No value'; } public getMaximumDate(all_dates) { let max_dt = all_dates[0], max_dtObj = new Date(all_dates[0]); all_dates.forEach(function (dt, index) { if (new Date(dt) > max_dtObj) { max_dt = dt; max_dtObj = new Date(dt); } }); return max_dt; } public getPatientCervicalScreeningSummary(patientUuid: string): void { this.cervicalCancerScreeningSummaryService .getCervicalCancerScreeningSummary(patientUuid) .subscribe( (result) => { if (result) { this.cervicalScreeningSummary = result; if (result.length > 0) { this.latestCervicalScreeningSummary = result[0]; } } }, (error) => { console.log('Error', error); } ); } }
{ this.moriskyRating = 'Inadequate'; }
conditional_block
loader.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Finds crate binaries and loads their metadata //! //! Might I be the first to welcome you to a world of platform differences, //! version requirements, dependency graphs, conflicting desires, and fun! This //! is the major guts (along with metadata::creader) of the compiler for loading //! crates and resolving dependencies. Let's take a tour! //! //! # The problem //! //! Each invocation of the compiler is immediately concerned with one primary //! problem, to connect a set of crates to resolved crates on the filesystem. //! Concretely speaking, the compiler follows roughly these steps to get here: //! //! 1. Discover a set of `extern crate` statements. //! 2. Transform these directives into crate names. If the directive does not //! have an explicit name, then the identifier is the name. //! 3. For each of these crate names, find a corresponding crate on the //! filesystem. //! //! Sounds easy, right? Let's walk into some of the nuances. //! //! ## Transitive Dependencies //! //! Let's say we've got three crates: A, B, and C. A depends on B, and B depends //! on C. When we're compiling A, we primarily need to find and locate B, but we //! also end up needing to find and locate C as well. //! //! The reason for this is that any of B's types could be composed of C's types, //! any function in B could return a type from C, etc. To be able to guarantee //! that we can always typecheck/translate any function, we have to have //! complete knowledge of the whole ecosystem, not just our immediate //! dependencies. //! //! So now as part of the "find a corresponding crate on the filesystem" step //! above, this involves also finding all crates for *all upstream //! dependencies*. This includes all dependencies transitively. //! //! ## Rlibs and Dylibs //! //! The compiler has two forms of intermediate dependencies. These are dubbed //! rlibs and dylibs for the static and dynamic variants, respectively. An rlib //! is a rustc-defined file format (currently just an ar archive) while a dylib //! is a platform-defined dynamic library. Each library has a metadata somewhere //! inside of it. //! //! When translating a crate name to a crate on the filesystem, we all of a //! sudden need to take into account both rlibs and dylibs! Linkage later on may //! use either one of these files, as each has their pros/cons. The job of crate //! loading is to discover what's possible by finding all candidates. //! //! Most parts of this loading systems keep the dylib/rlib as just separate //! variables. //! //! ## Where to look? //! //! We can't exactly scan your whole hard drive when looking for dependencies, //! so we need to places to look. Currently the compiler will implicitly add the //! target lib search path ($prefix/lib/rustlib/$target/lib) to any compilation, //! and otherwise all -L flags are added to the search paths. //! //! ## What criterion to select on? //! //! This a pretty tricky area of loading crates. Given a file, how do we know //! whether it's the right crate? Currently, the rules look along these lines: //! //! 1. Does the filename match an rlib/dylib pattern? That is to say, does the //! filename have the right prefix/suffix? //! 2. Does the filename have the right prefix for the crate name being queried? //! This is filtering for files like `libfoo*.rlib` and such. //! 3. Is the file an actual rust library? This is done by loading the metadata //! from the library and making sure it's actually there. //! 4. Does the name in the metadata agree with the name of the library? //! 5. Does the target in the metadata agree with the current target? //! 6. Does the SVH match? (more on this later) //! //! If the file answers `yes` to all these questions, then the file is //! considered as being *candidate* for being accepted. It is illegal to have //! more than two candidates as the compiler has no method by which to resolve //! this conflict. Additionally, rlib/dylib candidates are considered //! separately. //! //! After all this has happened, we have 1 or two files as candidates. These //! represent the rlib/dylib file found for a library, and they're returned as //! being found. //! //! ### What about versions? //! //! A lot of effort has been put forth to remove versioning from the compiler. //! There have been forays in the past to have versioning baked in, but it was //! largely always deemed insufficient to the point that it was recognized that //! it's probably something the compiler shouldn't do anyway due to its //! complicated nature and the state of the half-baked solutions. //! //! With a departure from versioning, the primary criterion for loading crates //! is just the name of a crate. If we stopped here, it would imply that you //! could never link two crates of the same name from different sources //! together, which is clearly a bad state to be in. //! //! To resolve this problem, we come to the next section! //! //! # Expert Mode //! //! A number of flags have been added to the compiler to solve the "version //! problem" in the previous section, as well as generally enabling more //! powerful usage of the crate loading system of the compiler. The goal of //! these flags and options are to enable third-party tools to drive the //! compiler with prior knowledge about how the world should look. //! //! ## The `--extern` flag //! //! The compiler accepts a flag of this form a number of times: //! //! ```notrust //! --extern crate-name=path/to/the/crate.rlib //! ``` //! //! This flag is basically the following letter to the compiler: //! //! > Dear rustc, //! > //! > When you are attempting to load the immediate dependency `crate-name`, I //! > would like you too assume that the library is located at //! > `path/to/the/crate.rlib`, and look nowhere else. Also, please do not //! > assume that the path I specified has the name `crate-name`. //! //! This flag basically overrides most matching logic except for validating that //! the file is indeed a rust library. The same `crate-name` can be specified //! twice to specify the rlib/dylib pair. //! //! ## Enabling "multiple versions" //! //! This basically boils down to the ability to specify arbitrary packages to //! the compiler. For example, if crate A wanted to use Bv1 and Bv2, then it //! would look something like: //! //! ```ignore //! extern crate b1; //! extern crate b2; //! //! fn main() {} //! ``` //! //! and the compiler would be invoked as: //! //! ```notrust //! rustc a.rs --extern b1=path/to/libb1.rlib --extern b2=path/to/libb2.rlib //! ``` //! //! In this scenario there are two crates named `b` and the compiler must be //! manually driven to be informed where each crate is. //! //! ## Frobbing symbols //! //! One of the immediate problems with linking the same library together twice //! in the same problem is dealing with duplicate symbols. The primary way to //! deal with this in rustc is to add hashes to the end of each symbol. //! //! In order to force hashes to change between versions of a library, if //! desired, the compiler exposes an option `-C metadata=foo`, which is used to //! initially seed each symbol hash. The string `foo` is prepended to each //! string-to-hash to ensure that symbols change over time. //! //! ## Loading transitive dependencies //! //! Dealing with same-named-but-distinct crates is not just a local problem, but //! one that also needs to be dealt with for transitive dependencies. Note that //! in the letter above `--extern` flags only apply to the *local* set of //! dependencies, not the upstream transitive dependencies. Consider this //! dependency graph: //! //! ```notrust //! A.1 A.2 //! | | //! | | //! B C //! \ / //! \ / //! D //! ``` //! //! In this scenario, when we compile `D`, we need to be able to distinctly //! resolve `A.1` and `A.2`, but an `--extern` flag cannot apply to these //! transitive dependencies. //! //! Note that the key idea here is that `B` and `C` are both *already compiled*. //! That is, they have already resolved their dependencies. Due to unrelated //! technical reasons, when a library is compiled, it is only compatible with //! the *exact same* version of the upstream libraries it was compiled against. //! We use the "Strict Version Hash" to identify the exact copy of an upstream //! library. //! //! With this knowledge, we know that `B` and `C` will depend on `A` with //! different SVH values, so we crawl the normal `-L` paths looking for //! `liba*.rlib` and filter based on the contained SVH. //! //! In the end, this ends up not needing `--extern` to specify upstream //! transitive dependencies. //! //! # Wrapping up //! //! That's the general overview of loading crates in the compiler, but it's by //! no means all of the necessary details. Take a look at the rest of //! metadata::loader or metadata::creader for all the juicy details! use back::archive::{METADATA_FILENAME}; use back::svh::Svh; use driver::session::Session; use llvm; use llvm::{False, ObjectFile, mk_section_iter}; use llvm::archive_ro::ArchiveRO; use metadata::cstore::{MetadataBlob, MetadataVec, MetadataArchive}; use metadata::decoder; use metadata::encoder; use metadata::filesearch::{FileSearch, FileMatches, FileDoesntMatch}; use syntax::codemap::Span; use syntax::diagnostic::SpanHandler; use util::fs; use std::c_str::ToCStr; use std::cmp; use std::collections::hash_map::{Occupied, Vacant}; use std::collections::{HashMap, HashSet}; use std::io::fs::PathExtensions; use std::io; use std::ptr; use std::slice; use std::string; use std::time::Duration; use flate; pub struct CrateMismatch { path: Path, got: String, } pub struct Context<'a> { pub sess: &'a Session, pub span: Span, pub ident: &'a str, pub crate_name: &'a str, pub hash: Option<&'a Svh>, pub triple: &'a str, pub filesearch: FileSearch<'a>, pub root: &'a Option<CratePaths>, pub rejected_via_hash: Vec<CrateMismatch>, pub rejected_via_triple: Vec<CrateMismatch>, pub should_match_name: bool, } pub struct Library { pub dylib: Option<Path>, pub rlib: Option<Path>, pub metadata: MetadataBlob, } pub struct ArchiveMetadata { _archive: ArchiveRO, // points into self._archive data: *const [u8], } pub struct CratePaths { pub ident: String, pub dylib: Option<Path>, pub rlib: Option<Path> } impl CratePaths { fn paths(&self) -> Vec<Path> { match (&self.dylib, &self.rlib) { (&None, &None) => vec!(), (&Some(ref p), &None) | (&None, &Some(ref p)) => vec!(p.clone()), (&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()), } } } impl<'a> Context<'a> { pub fn maybe_load_library_crate(&mut self) -> Option<Library> { self.find_library_crate() } pub fn load_library_crate(&mut self) -> Library { match self.find_library_crate() { Some(t) => t, None => { self.report_load_errs(); unreachable!() } } } pub fn report_load_errs(&mut self) { let message = if self.rejected_via_hash.len() > 0 { format!("found possibly newer version of crate `{}`", self.ident) } else if self.rejected_via_triple.len() > 0 { format!("found incorrect triple for crate `{}`", self.ident) } else { format!("can't find crate for `{}`", self.ident) }; let message = match self.root { &None => message, &Some(ref r) => format!("{} which `{}` depends on", message, r.ident) }; self.sess.span_err(self.span, message.as_slice()); let mismatches = self.rejected_via_triple.iter(); if self.rejected_via_triple.len() > 0 { self.sess.span_note(self.span, format!("expected triple of {}", self.triple).as_slice()); for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path {}{}, triple {}: {}", self.ident, "#", i+1, got, path.display()).as_slice()); } } if self.rejected_via_hash.len() > 0 { self.sess.span_note(self.span, "perhaps this crate needs \ to be recompiled?"); let mismatches = self.rejected_via_hash.iter(); for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path {}{}: {}", self.ident, "#", i+1, path.display()).as_slice()); } match self.root { &None => {} &Some(ref r) => { for (i, path) in r.paths().iter().enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path #{}: {}", r.ident, i+1, path.display()).as_slice()); } } } } self.sess.abort_if_errors(); } fn find_library_crate(&mut self) -> Option<Library> { // If an SVH is specified, then this is a transitive dependency that // must be loaded via -L plus some filtering. if self.hash.is_none() { self.should_match_name = false; match self.find_commandline_library() { Some(l) => return Some(l), None => {} } self.should_match_name = true; } let dypair = self.dylibname(); // want: crate_name.dir_part() + prefix + crate_name.file_part + "-" let dylib_prefix = format!("{}{}", dypair.ref0(), self.crate_name); let rlib_prefix = format!("lib{}", self.crate_name); let mut candidates = HashMap::new(); // First, find all possible candidate rlibs and dylibs purely based on // the name of the files themselves. We're trying to match against an // exact crate name and a possibly an exact hash. // // During this step, we can filter all found libraries based on the // name and id found in the crate id (we ignore the path portion for // filename matching), as well as the exact hash (if specified). If we // end up having many candidates, we must look at the metadata to // perform exact matches against hashes/crate ids. Note that opening up // the metadata is where we do an exact match against the full contents // of the crate id (path/name/id). // // The goal of this step is to look at as little metadata as possible. self.filesearch.search(|path| { let file = match path.filename_str() { None => return FileDoesntMatch, Some(file) => file, }; let (hash, rlib) = if file.starts_with(rlib_prefix.as_slice()) && file.ends_with(".rlib") { (file.slice(rlib_prefix.len(), file.len() - ".rlib".len()), true) } else if file.starts_with(dylib_prefix.as_slice()) && file.ends_with(dypair.ref1().as_slice()) { (file.slice(dylib_prefix.len(), file.len() - dypair.ref1().len()), false) } else { return FileDoesntMatch }; info!("lib candidate: {}", path.display()); let slot = match candidates.entry(hash.to_string()) { Occupied(entry) => entry.into_mut(), Vacant(entry) => entry.set((HashSet::new(), HashSet::new())), }; let (ref mut rlibs, ref mut dylibs) = *slot; if rlib { rlibs.insert(fs::realpath(path).unwrap()); } else { dylibs.insert(fs::realpath(path).unwrap()); } FileMatches }); // We have now collected all known libraries into a set of candidates // keyed of the filename hash listed. For each filename, we also have a // list of rlibs/dylibs that apply. Here, we map each of these lists // (per hash), to a Library candidate for returning. // // A Library candidate is created if the metadata for the set of // libraries corresponds to the crate id and hash criteria that this // search is being performed for. let mut libraries = Vec::new(); for (_hash, (rlibs, dylibs)) in candidates.into_iter() { let mut metadata = None; let rlib = self.extract_one(rlibs, "rlib", &mut metadata); let dylib = self.extract_one(dylibs, "dylib", &mut metadata); match metadata { Some(metadata) => { libraries.push(Library { dylib: dylib, rlib: rlib, metadata: metadata, }) } None => {} } } // Having now translated all relevant found hashes into libraries, see // what we've got and figure out if we found multiple candidates for // libraries or not. match libraries.len() { 0 => None, 1 => Some(libraries.into_iter().next().unwrap()), _ => { self.sess.span_err(self.span, format!("multiple matching crates for `{}`", self.crate_name).as_slice()); self.sess.note("candidates:"); for lib in libraries.iter() { match lib.dylib { Some(ref p) => { self.sess.note(format!("path: {}", p.display()).as_slice()); } None => {} } match lib.rlib { Some(ref p) => { self.sess.note(format!("path: {}", p.display()).as_slice()); } None => {} } let data = lib.metadata.as_slice(); let name = decoder::get_crate_name(data); note_crate_name(self.sess.diagnostic(), name.as_slice()); } None } } } // Attempts to extract *one* library from the set `m`. If the set has no // elements, `None` is returned. If the set has more than one element, then // the errors and notes are emitted about the set of libraries. // // With only one library in the set, this function will extract it, and then // read the metadata from it if `*slot` is `None`. If the metadata couldn't // be read, it is assumed that the file isn't a valid rust library (no // errors are emitted). fn extract_one(&mut self, m: HashSet<Path>, flavor: &str, slot: &mut Option<MetadataBlob>) -> Option<Path> { let mut ret = None::<Path>; let mut error = 0u; if slot.is_some() { // FIXME(#10786): for an optimization, we only read one of the // library's metadata sections. In theory we should // read both, but reading dylib metadata is quite // slow. if m.len() == 0 { return None } else if m.len() == 1 { return Some(m.into_iter().next().unwrap()) } } for lib in m.into_iter() { info!("{} reading metadata from: {}", flavor, lib.display()); let metadata = match get_metadata_section(self.sess.target.target.options.is_like_osx, &lib) { Ok(blob) => { if self.crate_matches(blob.as_slice(), &lib) { blob } else { info!("metadata mismatch"); continue } } Err(_) => { info!("no metadata found"); continue } }; if ret.is_some() { self.sess.span_err(self.span, format!("multiple {} candidates for `{}` \ found", flavor, self.crate_name).as_slice()); self.sess.span_note(self.span, format!(r"candidate #1: {}", ret.as_ref().unwrap() .display()).as_slice()); error = 1; ret = None; } if error > 0 { error += 1; self.sess.span_note(self.span, format!(r"candidate #{}: {}", error, lib.display()).as_slice()); continue } *slot = Some(metadata); ret = Some(lib); } return if error > 0 {None} else {ret} } fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool { if self.should_match_name { match decoder::maybe_get_crate_name(crate_data) { Some(ref name) if self.crate_name == name.as_slice() => {} _ => { info!("Rejecting via crate name"); return false } } } let hash = match decoder::maybe_get_crate_hash(crate_data) { Some(hash) => hash, None => { info!("Rejecting via lack of crate hash"); return false; } }; let triple = match decoder::get_crate_triple(crate_data) { None =>
Some(t) => t, }; if triple.as_slice() != self.triple { info!("Rejecting via crate triple: expected {} got {}", self.triple, triple); self.rejected_via_triple.push(CrateMismatch { path: libpath.clone(), got: triple.to_string() }); return false; } match self.hash { None => true, Some(myhash) => { if *myhash != hash { info!("Rejecting via hash: expected {} got {}", *myhash, hash); self.rejected_via_hash.push(CrateMismatch { path: libpath.clone(), got: myhash.as_str().to_string() }); false } else { true } } } } // Returns the corresponding (prefix, suffix) that files need to have for // dynamic libraries fn dylibname(&self) -> (String, String) { let t = &self.sess.target.target; (t.options.dll_prefix.clone(), t.options.dll_suffix.clone()) } fn find_commandline_library(&mut self) -> Option<Library> { let locs = match self.sess.opts.externs.find_equiv(self.crate_name) { Some(s) => s, None => return None, }; // First, filter out all libraries that look suspicious. We only accept // files which actually exist that have the correct naming scheme for // rlibs/dylibs. let sess = self.sess; let dylibname = self.dylibname(); let mut rlibs = HashSet::new(); let mut dylibs = HashSet::new(); { let mut locs = locs.iter().map(|l| Path::new(l.as_slice())).filter(|loc| { if !loc.exists() { sess.err(format!("extern location for {} does not exist: {}", self.crate_name, loc.display()).as_slice()); return false; } let file = match loc.filename_str() { Some(file) => file, None => { sess.err(format!("extern location for {} is not a file: {}", self.crate_name, loc.display()).as_slice()); return false; } }; if file.starts_with("lib") && file.ends_with(".rlib") { return true } else { let (ref prefix, ref suffix) = dylibname; if file.starts_with(prefix.as_slice()) && file.ends_with(suffix.as_slice()) { return true } } sess.err(format!("extern location for {} is of an unknown type: {}", self.crate_name, loc.display()).as_slice()); false }); // Now that we have an iterator of good candidates, make sure there's at // most one rlib and at most one dylib. for loc in locs { if loc.filename_str().unwrap().ends_with(".rlib") { rlibs.insert(fs::realpath(&loc).unwrap()); } else { dylibs.insert(fs::realpath(&loc).unwrap()); } } }; // Extract the rlib/dylib pair. let mut metadata = None; let rlib = self.extract_one(rlibs, "rlib", &mut metadata); let dylib = self.extract_one(dylibs, "dylib", &mut metadata); if rlib.is_none() && dylib.is_none() { return None } match metadata { Some(metadata) => Some(Library { dylib: dylib, rlib: rlib, metadata: metadata, }), None => None, } } } pub fn note_crate_name(diag: &SpanHandler, name: &str) { diag.handler().note(format!("crate name: {}", name).as_slice()); } impl ArchiveMetadata { fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> { let data = match ar.read(METADATA_FILENAME) { Some(data) => data as *const [u8], None => { debug!("didn't find '{}' in the archive", METADATA_FILENAME); return None; } }; Some(ArchiveMetadata { _archive: ar, data: data, }) } pub fn as_slice<'a>(&'a self) -> &'a [u8] { unsafe { &*self.data } } } // Just a small wrapper to time how long reading metadata takes. fn get_metadata_section(is_osx: bool, filename: &Path) -> Result<MetadataBlob, String> { let mut ret = None; let dur = Duration::span(|| { ret = Some(get_metadata_section_imp(is_osx, filename)); }); info!("reading {} => {}ms", filename.filename_display(), dur.num_milliseconds()); return ret.unwrap();; } fn get_metadata_section_imp(is_osx: bool, filename: &Path) -> Result<MetadataBlob, String> { if !filename.exists() { return Err(format!("no such file: '{}'", filename.display())); } if filename.filename_str().unwrap().ends_with(".rlib") { // Use ArchiveRO for speed here, it's backed by LLVM and uses mmap // internally to read the file. We also avoid even using a memcpy by // just keeping the archive along while the metadata is in use. let archive = match ArchiveRO::open(filename) { Some(ar) => ar, None => { debug!("llvm didn't like `{}`", filename.display()); return Err(format!("failed to read rlib metadata: '{}'", filename.display())); } }; return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) { None => { return Err((format!("failed to read rlib metadata: '{}'", filename.display()))) } Some(blob) => return Ok(blob) } } unsafe { let mb = filename.with_c_str(|buf| { llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf) }); if mb as int == 0 { return Err(format!("error reading library: '{}'", filename.display())) } let of = match ObjectFile::new(mb) { Some(of) => of, _ => { return Err((format!("provided path not an object file: '{}'", filename.display()))) } }; let si = mk_section_iter(of.llof); while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False { let mut name_buf = ptr::null(); let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf); let name = string::raw::from_buf_len(name_buf as *const u8, name_len as uint); debug!("get_metadata_section: name {}", name); if read_meta_section_name(is_osx).as_slice() == name.as_slice() { let cbuf = llvm::LLVMGetSectionContents(si.llsi); let csz = llvm::LLVMGetSectionSize(si.llsi) as uint; let mut found = Err(format!("metadata not found: '{}'", filename.display())); let cvbuf: *const u8 = cbuf as *const u8; let vlen = encoder::metadata_encoding_version.len(); debug!("checking {} bytes of metadata-version stamp", vlen); let minsz = cmp::min(vlen, csz); let version_ok = slice::raw::buf_as_slice(cvbuf, minsz, |buf0| buf0 == encoder::metadata_encoding_version); if !version_ok { return Err((format!("incompatible metadata version found: '{}'", filename.display()))); } let cvbuf1 = cvbuf.offset(vlen as int); debug!("inflating {} bytes of compressed metadata", csz - vlen); slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| { match flate::inflate_bytes(bytes) { Some(inflated) => found = Ok(MetadataVec(inflated)), None => { found = Err(format!("failed to decompress \ metadata for: '{}'", filename.display())) } } }); if found.is_ok() { return found; } } llvm::LLVMMoveToNextSection(si.llsi); } return Err(format!("metadata not found: '{}'", filename.display())); } } pub fn meta_section_name(is_osx: bool) -> &'static str { if is_osx { "__DATA,__note.rustc" } else { ".note.rustc" } } pub fn read_meta_section_name(is_osx: bool) -> &'static str { if is_osx { "__note.rustc" } else { ".note.rustc" } } // A diagnostic function for dumping crate metadata to an output stream pub fn list_file_metadata(is_osx: bool, path: &Path, out: &mut io::Writer) -> io::IoResult<()> { match get_metadata_section(is_osx, path) { Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out), Err(msg) => { write!(out, "{}\n", msg) } } }
{ debug!("triple not present"); return false }
conditional_block
loader.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Finds crate binaries and loads their metadata //! //! Might I be the first to welcome you to a world of platform differences, //! version requirements, dependency graphs, conflicting desires, and fun! This //! is the major guts (along with metadata::creader) of the compiler for loading //! crates and resolving dependencies. Let's take a tour! //! //! # The problem //! //! Each invocation of the compiler is immediately concerned with one primary //! problem, to connect a set of crates to resolved crates on the filesystem. //! Concretely speaking, the compiler follows roughly these steps to get here: //! //! 1. Discover a set of `extern crate` statements. //! 2. Transform these directives into crate names. If the directive does not //! have an explicit name, then the identifier is the name. //! 3. For each of these crate names, find a corresponding crate on the //! filesystem. //! //! Sounds easy, right? Let's walk into some of the nuances. //! //! ## Transitive Dependencies //! //! Let's say we've got three crates: A, B, and C. A depends on B, and B depends //! on C. When we're compiling A, we primarily need to find and locate B, but we //! also end up needing to find and locate C as well. //! //! The reason for this is that any of B's types could be composed of C's types, //! any function in B could return a type from C, etc. To be able to guarantee //! that we can always typecheck/translate any function, we have to have //! complete knowledge of the whole ecosystem, not just our immediate //! dependencies. //! //! So now as part of the "find a corresponding crate on the filesystem" step //! above, this involves also finding all crates for *all upstream //! dependencies*. This includes all dependencies transitively. //! //! ## Rlibs and Dylibs //! //! The compiler has two forms of intermediate dependencies. These are dubbed //! rlibs and dylibs for the static and dynamic variants, respectively. An rlib //! is a rustc-defined file format (currently just an ar archive) while a dylib //! is a platform-defined dynamic library. Each library has a metadata somewhere //! inside of it. //! //! When translating a crate name to a crate on the filesystem, we all of a //! sudden need to take into account both rlibs and dylibs! Linkage later on may //! use either one of these files, as each has their pros/cons. The job of crate //! loading is to discover what's possible by finding all candidates. //! //! Most parts of this loading systems keep the dylib/rlib as just separate //! variables. //! //! ## Where to look? //! //! We can't exactly scan your whole hard drive when looking for dependencies, //! so we need to places to look. Currently the compiler will implicitly add the //! target lib search path ($prefix/lib/rustlib/$target/lib) to any compilation, //! and otherwise all -L flags are added to the search paths. //! //! ## What criterion to select on? //! //! This a pretty tricky area of loading crates. Given a file, how do we know //! whether it's the right crate? Currently, the rules look along these lines: //! //! 1. Does the filename match an rlib/dylib pattern? That is to say, does the //! filename have the right prefix/suffix? //! 2. Does the filename have the right prefix for the crate name being queried? //! This is filtering for files like `libfoo*.rlib` and such. //! 3. Is the file an actual rust library? This is done by loading the metadata //! from the library and making sure it's actually there. //! 4. Does the name in the metadata agree with the name of the library? //! 5. Does the target in the metadata agree with the current target? //! 6. Does the SVH match? (more on this later) //! //! If the file answers `yes` to all these questions, then the file is //! considered as being *candidate* for being accepted. It is illegal to have //! more than two candidates as the compiler has no method by which to resolve //! this conflict. Additionally, rlib/dylib candidates are considered //! separately. //! //! After all this has happened, we have 1 or two files as candidates. These //! represent the rlib/dylib file found for a library, and they're returned as //! being found. //! //! ### What about versions? //! //! A lot of effort has been put forth to remove versioning from the compiler. //! There have been forays in the past to have versioning baked in, but it was //! largely always deemed insufficient to the point that it was recognized that //! it's probably something the compiler shouldn't do anyway due to its //! complicated nature and the state of the half-baked solutions. //! //! With a departure from versioning, the primary criterion for loading crates //! is just the name of a crate. If we stopped here, it would imply that you //! could never link two crates of the same name from different sources //! together, which is clearly a bad state to be in. //! //! To resolve this problem, we come to the next section! //! //! # Expert Mode //! //! A number of flags have been added to the compiler to solve the "version //! problem" in the previous section, as well as generally enabling more //! powerful usage of the crate loading system of the compiler. The goal of //! these flags and options are to enable third-party tools to drive the //! compiler with prior knowledge about how the world should look. //! //! ## The `--extern` flag //! //! The compiler accepts a flag of this form a number of times: //! //! ```notrust //! --extern crate-name=path/to/the/crate.rlib //! ``` //! //! This flag is basically the following letter to the compiler: //! //! > Dear rustc, //! > //! > When you are attempting to load the immediate dependency `crate-name`, I //! > would like you too assume that the library is located at //! > `path/to/the/crate.rlib`, and look nowhere else. Also, please do not //! > assume that the path I specified has the name `crate-name`. //! //! This flag basically overrides most matching logic except for validating that //! the file is indeed a rust library. The same `crate-name` can be specified //! twice to specify the rlib/dylib pair. //! //! ## Enabling "multiple versions" //! //! This basically boils down to the ability to specify arbitrary packages to //! the compiler. For example, if crate A wanted to use Bv1 and Bv2, then it //! would look something like: //! //! ```ignore //! extern crate b1; //! extern crate b2; //! //! fn main() {} //! ``` //! //! and the compiler would be invoked as: //! //! ```notrust //! rustc a.rs --extern b1=path/to/libb1.rlib --extern b2=path/to/libb2.rlib //! ``` //! //! In this scenario there are two crates named `b` and the compiler must be //! manually driven to be informed where each crate is. //! //! ## Frobbing symbols //! //! One of the immediate problems with linking the same library together twice //! in the same problem is dealing with duplicate symbols. The primary way to //! deal with this in rustc is to add hashes to the end of each symbol. //! //! In order to force hashes to change between versions of a library, if //! desired, the compiler exposes an option `-C metadata=foo`, which is used to //! initially seed each symbol hash. The string `foo` is prepended to each //! string-to-hash to ensure that symbols change over time. //! //! ## Loading transitive dependencies //! //! Dealing with same-named-but-distinct crates is not just a local problem, but //! one that also needs to be dealt with for transitive dependencies. Note that //! in the letter above `--extern` flags only apply to the *local* set of //! dependencies, not the upstream transitive dependencies. Consider this //! dependency graph: //! //! ```notrust //! A.1 A.2 //! | | //! | | //! B C //! \ / //! \ / //! D //! ``` //! //! In this scenario, when we compile `D`, we need to be able to distinctly //! resolve `A.1` and `A.2`, but an `--extern` flag cannot apply to these //! transitive dependencies. //! //! Note that the key idea here is that `B` and `C` are both *already compiled*. //! That is, they have already resolved their dependencies. Due to unrelated //! technical reasons, when a library is compiled, it is only compatible with //! the *exact same* version of the upstream libraries it was compiled against. //! We use the "Strict Version Hash" to identify the exact copy of an upstream //! library. //! //! With this knowledge, we know that `B` and `C` will depend on `A` with //! different SVH values, so we crawl the normal `-L` paths looking for //! `liba*.rlib` and filter based on the contained SVH. //! //! In the end, this ends up not needing `--extern` to specify upstream //! transitive dependencies. //! //! # Wrapping up //! //! That's the general overview of loading crates in the compiler, but it's by //! no means all of the necessary details. Take a look at the rest of //! metadata::loader or metadata::creader for all the juicy details! use back::archive::{METADATA_FILENAME}; use back::svh::Svh; use driver::session::Session; use llvm; use llvm::{False, ObjectFile, mk_section_iter}; use llvm::archive_ro::ArchiveRO; use metadata::cstore::{MetadataBlob, MetadataVec, MetadataArchive}; use metadata::decoder; use metadata::encoder; use metadata::filesearch::{FileSearch, FileMatches, FileDoesntMatch}; use syntax::codemap::Span; use syntax::diagnostic::SpanHandler; use util::fs; use std::c_str::ToCStr; use std::cmp; use std::collections::hash_map::{Occupied, Vacant}; use std::collections::{HashMap, HashSet}; use std::io::fs::PathExtensions; use std::io; use std::ptr; use std::slice; use std::string; use std::time::Duration; use flate; pub struct CrateMismatch { path: Path, got: String, } pub struct Context<'a> { pub sess: &'a Session, pub span: Span, pub ident: &'a str, pub crate_name: &'a str, pub hash: Option<&'a Svh>, pub triple: &'a str, pub filesearch: FileSearch<'a>, pub root: &'a Option<CratePaths>, pub rejected_via_hash: Vec<CrateMismatch>, pub rejected_via_triple: Vec<CrateMismatch>, pub should_match_name: bool, } pub struct Library { pub dylib: Option<Path>, pub rlib: Option<Path>, pub metadata: MetadataBlob, } pub struct ArchiveMetadata { _archive: ArchiveRO, // points into self._archive data: *const [u8], } pub struct CratePaths { pub ident: String, pub dylib: Option<Path>, pub rlib: Option<Path> } impl CratePaths { fn paths(&self) -> Vec<Path> { match (&self.dylib, &self.rlib) { (&None, &None) => vec!(), (&Some(ref p), &None) | (&None, &Some(ref p)) => vec!(p.clone()), (&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()), } } } impl<'a> Context<'a> { pub fn maybe_load_library_crate(&mut self) -> Option<Library> { self.find_library_crate() } pub fn load_library_crate(&mut self) -> Library { match self.find_library_crate() { Some(t) => t, None => { self.report_load_errs(); unreachable!() } } } pub fn report_load_errs(&mut self) { let message = if self.rejected_via_hash.len() > 0 { format!("found possibly newer version of crate `{}`", self.ident) } else if self.rejected_via_triple.len() > 0 { format!("found incorrect triple for crate `{}`", self.ident) } else { format!("can't find crate for `{}`", self.ident) }; let message = match self.root { &None => message, &Some(ref r) => format!("{} which `{}` depends on", message, r.ident) }; self.sess.span_err(self.span, message.as_slice()); let mismatches = self.rejected_via_triple.iter(); if self.rejected_via_triple.len() > 0 { self.sess.span_note(self.span, format!("expected triple of {}", self.triple).as_slice()); for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path {}{}, triple {}: {}", self.ident, "#", i+1, got, path.display()).as_slice()); } } if self.rejected_via_hash.len() > 0 { self.sess.span_note(self.span, "perhaps this crate needs \ to be recompiled?"); let mismatches = self.rejected_via_hash.iter(); for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path {}{}: {}", self.ident, "#", i+1, path.display()).as_slice()); } match self.root { &None => {} &Some(ref r) => { for (i, path) in r.paths().iter().enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path #{}: {}", r.ident, i+1, path.display()).as_slice()); } } } } self.sess.abort_if_errors(); } fn
(&mut self) -> Option<Library> { // If an SVH is specified, then this is a transitive dependency that // must be loaded via -L plus some filtering. if self.hash.is_none() { self.should_match_name = false; match self.find_commandline_library() { Some(l) => return Some(l), None => {} } self.should_match_name = true; } let dypair = self.dylibname(); // want: crate_name.dir_part() + prefix + crate_name.file_part + "-" let dylib_prefix = format!("{}{}", dypair.ref0(), self.crate_name); let rlib_prefix = format!("lib{}", self.crate_name); let mut candidates = HashMap::new(); // First, find all possible candidate rlibs and dylibs purely based on // the name of the files themselves. We're trying to match against an // exact crate name and a possibly an exact hash. // // During this step, we can filter all found libraries based on the // name and id found in the crate id (we ignore the path portion for // filename matching), as well as the exact hash (if specified). If we // end up having many candidates, we must look at the metadata to // perform exact matches against hashes/crate ids. Note that opening up // the metadata is where we do an exact match against the full contents // of the crate id (path/name/id). // // The goal of this step is to look at as little metadata as possible. self.filesearch.search(|path| { let file = match path.filename_str() { None => return FileDoesntMatch, Some(file) => file, }; let (hash, rlib) = if file.starts_with(rlib_prefix.as_slice()) && file.ends_with(".rlib") { (file.slice(rlib_prefix.len(), file.len() - ".rlib".len()), true) } else if file.starts_with(dylib_prefix.as_slice()) && file.ends_with(dypair.ref1().as_slice()) { (file.slice(dylib_prefix.len(), file.len() - dypair.ref1().len()), false) } else { return FileDoesntMatch }; info!("lib candidate: {}", path.display()); let slot = match candidates.entry(hash.to_string()) { Occupied(entry) => entry.into_mut(), Vacant(entry) => entry.set((HashSet::new(), HashSet::new())), }; let (ref mut rlibs, ref mut dylibs) = *slot; if rlib { rlibs.insert(fs::realpath(path).unwrap()); } else { dylibs.insert(fs::realpath(path).unwrap()); } FileMatches }); // We have now collected all known libraries into a set of candidates // keyed of the filename hash listed. For each filename, we also have a // list of rlibs/dylibs that apply. Here, we map each of these lists // (per hash), to a Library candidate for returning. // // A Library candidate is created if the metadata for the set of // libraries corresponds to the crate id and hash criteria that this // search is being performed for. let mut libraries = Vec::new(); for (_hash, (rlibs, dylibs)) in candidates.into_iter() { let mut metadata = None; let rlib = self.extract_one(rlibs, "rlib", &mut metadata); let dylib = self.extract_one(dylibs, "dylib", &mut metadata); match metadata { Some(metadata) => { libraries.push(Library { dylib: dylib, rlib: rlib, metadata: metadata, }) } None => {} } } // Having now translated all relevant found hashes into libraries, see // what we've got and figure out if we found multiple candidates for // libraries or not. match libraries.len() { 0 => None, 1 => Some(libraries.into_iter().next().unwrap()), _ => { self.sess.span_err(self.span, format!("multiple matching crates for `{}`", self.crate_name).as_slice()); self.sess.note("candidates:"); for lib in libraries.iter() { match lib.dylib { Some(ref p) => { self.sess.note(format!("path: {}", p.display()).as_slice()); } None => {} } match lib.rlib { Some(ref p) => { self.sess.note(format!("path: {}", p.display()).as_slice()); } None => {} } let data = lib.metadata.as_slice(); let name = decoder::get_crate_name(data); note_crate_name(self.sess.diagnostic(), name.as_slice()); } None } } } // Attempts to extract *one* library from the set `m`. If the set has no // elements, `None` is returned. If the set has more than one element, then // the errors and notes are emitted about the set of libraries. // // With only one library in the set, this function will extract it, and then // read the metadata from it if `*slot` is `None`. If the metadata couldn't // be read, it is assumed that the file isn't a valid rust library (no // errors are emitted). fn extract_one(&mut self, m: HashSet<Path>, flavor: &str, slot: &mut Option<MetadataBlob>) -> Option<Path> { let mut ret = None::<Path>; let mut error = 0u; if slot.is_some() { // FIXME(#10786): for an optimization, we only read one of the // library's metadata sections. In theory we should // read both, but reading dylib metadata is quite // slow. if m.len() == 0 { return None } else if m.len() == 1 { return Some(m.into_iter().next().unwrap()) } } for lib in m.into_iter() { info!("{} reading metadata from: {}", flavor, lib.display()); let metadata = match get_metadata_section(self.sess.target.target.options.is_like_osx, &lib) { Ok(blob) => { if self.crate_matches(blob.as_slice(), &lib) { blob } else { info!("metadata mismatch"); continue } } Err(_) => { info!("no metadata found"); continue } }; if ret.is_some() { self.sess.span_err(self.span, format!("multiple {} candidates for `{}` \ found", flavor, self.crate_name).as_slice()); self.sess.span_note(self.span, format!(r"candidate #1: {}", ret.as_ref().unwrap() .display()).as_slice()); error = 1; ret = None; } if error > 0 { error += 1; self.sess.span_note(self.span, format!(r"candidate #{}: {}", error, lib.display()).as_slice()); continue } *slot = Some(metadata); ret = Some(lib); } return if error > 0 {None} else {ret} } fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool { if self.should_match_name { match decoder::maybe_get_crate_name(crate_data) { Some(ref name) if self.crate_name == name.as_slice() => {} _ => { info!("Rejecting via crate name"); return false } } } let hash = match decoder::maybe_get_crate_hash(crate_data) { Some(hash) => hash, None => { info!("Rejecting via lack of crate hash"); return false; } }; let triple = match decoder::get_crate_triple(crate_data) { None => { debug!("triple not present"); return false } Some(t) => t, }; if triple.as_slice() != self.triple { info!("Rejecting via crate triple: expected {} got {}", self.triple, triple); self.rejected_via_triple.push(CrateMismatch { path: libpath.clone(), got: triple.to_string() }); return false; } match self.hash { None => true, Some(myhash) => { if *myhash != hash { info!("Rejecting via hash: expected {} got {}", *myhash, hash); self.rejected_via_hash.push(CrateMismatch { path: libpath.clone(), got: myhash.as_str().to_string() }); false } else { true } } } } // Returns the corresponding (prefix, suffix) that files need to have for // dynamic libraries fn dylibname(&self) -> (String, String) { let t = &self.sess.target.target; (t.options.dll_prefix.clone(), t.options.dll_suffix.clone()) } fn find_commandline_library(&mut self) -> Option<Library> { let locs = match self.sess.opts.externs.find_equiv(self.crate_name) { Some(s) => s, None => return None, }; // First, filter out all libraries that look suspicious. We only accept // files which actually exist that have the correct naming scheme for // rlibs/dylibs. let sess = self.sess; let dylibname = self.dylibname(); let mut rlibs = HashSet::new(); let mut dylibs = HashSet::new(); { let mut locs = locs.iter().map(|l| Path::new(l.as_slice())).filter(|loc| { if !loc.exists() { sess.err(format!("extern location for {} does not exist: {}", self.crate_name, loc.display()).as_slice()); return false; } let file = match loc.filename_str() { Some(file) => file, None => { sess.err(format!("extern location for {} is not a file: {}", self.crate_name, loc.display()).as_slice()); return false; } }; if file.starts_with("lib") && file.ends_with(".rlib") { return true } else { let (ref prefix, ref suffix) = dylibname; if file.starts_with(prefix.as_slice()) && file.ends_with(suffix.as_slice()) { return true } } sess.err(format!("extern location for {} is of an unknown type: {}", self.crate_name, loc.display()).as_slice()); false }); // Now that we have an iterator of good candidates, make sure there's at // most one rlib and at most one dylib. for loc in locs { if loc.filename_str().unwrap().ends_with(".rlib") { rlibs.insert(fs::realpath(&loc).unwrap()); } else { dylibs.insert(fs::realpath(&loc).unwrap()); } } }; // Extract the rlib/dylib pair. let mut metadata = None; let rlib = self.extract_one(rlibs, "rlib", &mut metadata); let dylib = self.extract_one(dylibs, "dylib", &mut metadata); if rlib.is_none() && dylib.is_none() { return None } match metadata { Some(metadata) => Some(Library { dylib: dylib, rlib: rlib, metadata: metadata, }), None => None, } } } pub fn note_crate_name(diag: &SpanHandler, name: &str) { diag.handler().note(format!("crate name: {}", name).as_slice()); } impl ArchiveMetadata { fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> { let data = match ar.read(METADATA_FILENAME) { Some(data) => data as *const [u8], None => { debug!("didn't find '{}' in the archive", METADATA_FILENAME); return None; } }; Some(ArchiveMetadata { _archive: ar, data: data, }) } pub fn as_slice<'a>(&'a self) -> &'a [u8] { unsafe { &*self.data } } } // Just a small wrapper to time how long reading metadata takes. fn get_metadata_section(is_osx: bool, filename: &Path) -> Result<MetadataBlob, String> { let mut ret = None; let dur = Duration::span(|| { ret = Some(get_metadata_section_imp(is_osx, filename)); }); info!("reading {} => {}ms", filename.filename_display(), dur.num_milliseconds()); return ret.unwrap();; } fn get_metadata_section_imp(is_osx: bool, filename: &Path) -> Result<MetadataBlob, String> { if !filename.exists() { return Err(format!("no such file: '{}'", filename.display())); } if filename.filename_str().unwrap().ends_with(".rlib") { // Use ArchiveRO for speed here, it's backed by LLVM and uses mmap // internally to read the file. We also avoid even using a memcpy by // just keeping the archive along while the metadata is in use. let archive = match ArchiveRO::open(filename) { Some(ar) => ar, None => { debug!("llvm didn't like `{}`", filename.display()); return Err(format!("failed to read rlib metadata: '{}'", filename.display())); } }; return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) { None => { return Err((format!("failed to read rlib metadata: '{}'", filename.display()))) } Some(blob) => return Ok(blob) } } unsafe { let mb = filename.with_c_str(|buf| { llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf) }); if mb as int == 0 { return Err(format!("error reading library: '{}'", filename.display())) } let of = match ObjectFile::new(mb) { Some(of) => of, _ => { return Err((format!("provided path not an object file: '{}'", filename.display()))) } }; let si = mk_section_iter(of.llof); while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False { let mut name_buf = ptr::null(); let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf); let name = string::raw::from_buf_len(name_buf as *const u8, name_len as uint); debug!("get_metadata_section: name {}", name); if read_meta_section_name(is_osx).as_slice() == name.as_slice() { let cbuf = llvm::LLVMGetSectionContents(si.llsi); let csz = llvm::LLVMGetSectionSize(si.llsi) as uint; let mut found = Err(format!("metadata not found: '{}'", filename.display())); let cvbuf: *const u8 = cbuf as *const u8; let vlen = encoder::metadata_encoding_version.len(); debug!("checking {} bytes of metadata-version stamp", vlen); let minsz = cmp::min(vlen, csz); let version_ok = slice::raw::buf_as_slice(cvbuf, minsz, |buf0| buf0 == encoder::metadata_encoding_version); if !version_ok { return Err((format!("incompatible metadata version found: '{}'", filename.display()))); } let cvbuf1 = cvbuf.offset(vlen as int); debug!("inflating {} bytes of compressed metadata", csz - vlen); slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| { match flate::inflate_bytes(bytes) { Some(inflated) => found = Ok(MetadataVec(inflated)), None => { found = Err(format!("failed to decompress \ metadata for: '{}'", filename.display())) } } }); if found.is_ok() { return found; } } llvm::LLVMMoveToNextSection(si.llsi); } return Err(format!("metadata not found: '{}'", filename.display())); } } pub fn meta_section_name(is_osx: bool) -> &'static str { if is_osx { "__DATA,__note.rustc" } else { ".note.rustc" } } pub fn read_meta_section_name(is_osx: bool) -> &'static str { if is_osx { "__note.rustc" } else { ".note.rustc" } } // A diagnostic function for dumping crate metadata to an output stream pub fn list_file_metadata(is_osx: bool, path: &Path, out: &mut io::Writer) -> io::IoResult<()> { match get_metadata_section(is_osx, path) { Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out), Err(msg) => { write!(out, "{}\n", msg) } } }
find_library_crate
identifier_name
loader.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Finds crate binaries and loads their metadata //! //! Might I be the first to welcome you to a world of platform differences, //! version requirements, dependency graphs, conflicting desires, and fun! This //! is the major guts (along with metadata::creader) of the compiler for loading //! crates and resolving dependencies. Let's take a tour! //! //! # The problem //! //! Each invocation of the compiler is immediately concerned with one primary //! problem, to connect a set of crates to resolved crates on the filesystem. //! Concretely speaking, the compiler follows roughly these steps to get here: //! //! 1. Discover a set of `extern crate` statements. //! 2. Transform these directives into crate names. If the directive does not //! have an explicit name, then the identifier is the name. //! 3. For each of these crate names, find a corresponding crate on the //! filesystem. //! //! Sounds easy, right? Let's walk into some of the nuances. //! //! ## Transitive Dependencies //! //! Let's say we've got three crates: A, B, and C. A depends on B, and B depends //! on C. When we're compiling A, we primarily need to find and locate B, but we //! also end up needing to find and locate C as well. //! //! The reason for this is that any of B's types could be composed of C's types, //! any function in B could return a type from C, etc. To be able to guarantee //! that we can always typecheck/translate any function, we have to have //! complete knowledge of the whole ecosystem, not just our immediate //! dependencies. //! //! So now as part of the "find a corresponding crate on the filesystem" step //! above, this involves also finding all crates for *all upstream //! dependencies*. This includes all dependencies transitively. //! //! ## Rlibs and Dylibs //! //! The compiler has two forms of intermediate dependencies. These are dubbed //! rlibs and dylibs for the static and dynamic variants, respectively. An rlib //! is a rustc-defined file format (currently just an ar archive) while a dylib //! is a platform-defined dynamic library. Each library has a metadata somewhere //! inside of it. //! //! When translating a crate name to a crate on the filesystem, we all of a //! sudden need to take into account both rlibs and dylibs! Linkage later on may //! use either one of these files, as each has their pros/cons. The job of crate //! loading is to discover what's possible by finding all candidates. //! //! Most parts of this loading systems keep the dylib/rlib as just separate //! variables. //! //! ## Where to look? //! //! We can't exactly scan your whole hard drive when looking for dependencies, //! so we need to places to look. Currently the compiler will implicitly add the //! target lib search path ($prefix/lib/rustlib/$target/lib) to any compilation, //! and otherwise all -L flags are added to the search paths. //! //! ## What criterion to select on? //! //! This a pretty tricky area of loading crates. Given a file, how do we know //! whether it's the right crate? Currently, the rules look along these lines: //! //! 1. Does the filename match an rlib/dylib pattern? That is to say, does the //! filename have the right prefix/suffix? //! 2. Does the filename have the right prefix for the crate name being queried? //! This is filtering for files like `libfoo*.rlib` and such. //! 3. Is the file an actual rust library? This is done by loading the metadata //! from the library and making sure it's actually there. //! 4. Does the name in the metadata agree with the name of the library? //! 5. Does the target in the metadata agree with the current target? //! 6. Does the SVH match? (more on this later) //! //! If the file answers `yes` to all these questions, then the file is //! considered as being *candidate* for being accepted. It is illegal to have //! more than two candidates as the compiler has no method by which to resolve //! this conflict. Additionally, rlib/dylib candidates are considered //! separately. //! //! After all this has happened, we have 1 or two files as candidates. These //! represent the rlib/dylib file found for a library, and they're returned as //! being found. //! //! ### What about versions? //! //! A lot of effort has been put forth to remove versioning from the compiler. //! There have been forays in the past to have versioning baked in, but it was //! largely always deemed insufficient to the point that it was recognized that //! it's probably something the compiler shouldn't do anyway due to its //! complicated nature and the state of the half-baked solutions. //! //! With a departure from versioning, the primary criterion for loading crates //! is just the name of a crate. If we stopped here, it would imply that you //! could never link two crates of the same name from different sources //! together, which is clearly a bad state to be in. //! //! To resolve this problem, we come to the next section! //! //! # Expert Mode //! //! A number of flags have been added to the compiler to solve the "version //! problem" in the previous section, as well as generally enabling more //! powerful usage of the crate loading system of the compiler. The goal of //! these flags and options are to enable third-party tools to drive the //! compiler with prior knowledge about how the world should look. //! //! ## The `--extern` flag //! //! The compiler accepts a flag of this form a number of times: //! //! ```notrust //! --extern crate-name=path/to/the/crate.rlib //! ``` //! //! This flag is basically the following letter to the compiler: //! //! > Dear rustc, //! > //! > When you are attempting to load the immediate dependency `crate-name`, I //! > would like you too assume that the library is located at //! > `path/to/the/crate.rlib`, and look nowhere else. Also, please do not //! > assume that the path I specified has the name `crate-name`. //! //! This flag basically overrides most matching logic except for validating that //! the file is indeed a rust library. The same `crate-name` can be specified //! twice to specify the rlib/dylib pair. //! //! ## Enabling "multiple versions" //! //! This basically boils down to the ability to specify arbitrary packages to //! the compiler. For example, if crate A wanted to use Bv1 and Bv2, then it //! would look something like: //! //! ```ignore //! extern crate b1; //! extern crate b2; //! //! fn main() {} //! ``` //! //! and the compiler would be invoked as: //! //! ```notrust //! rustc a.rs --extern b1=path/to/libb1.rlib --extern b2=path/to/libb2.rlib //! ``` //! //! In this scenario there are two crates named `b` and the compiler must be //! manually driven to be informed where each crate is. //! //! ## Frobbing symbols //! //! One of the immediate problems with linking the same library together twice //! in the same problem is dealing with duplicate symbols. The primary way to //! deal with this in rustc is to add hashes to the end of each symbol. //! //! In order to force hashes to change between versions of a library, if //! desired, the compiler exposes an option `-C metadata=foo`, which is used to //! initially seed each symbol hash. The string `foo` is prepended to each //! string-to-hash to ensure that symbols change over time. //! //! ## Loading transitive dependencies //! //! Dealing with same-named-but-distinct crates is not just a local problem, but //! one that also needs to be dealt with for transitive dependencies. Note that //! in the letter above `--extern` flags only apply to the *local* set of //! dependencies, not the upstream transitive dependencies. Consider this //! dependency graph: //! //! ```notrust //! A.1 A.2 //! | | //! | | //! B C //! \ / //! \ / //! D //! ``` //! //! In this scenario, when we compile `D`, we need to be able to distinctly //! resolve `A.1` and `A.2`, but an `--extern` flag cannot apply to these //! transitive dependencies. //! //! Note that the key idea here is that `B` and `C` are both *already compiled*. //! That is, they have already resolved their dependencies. Due to unrelated //! technical reasons, when a library is compiled, it is only compatible with //! the *exact same* version of the upstream libraries it was compiled against. //! We use the "Strict Version Hash" to identify the exact copy of an upstream //! library. //! //! With this knowledge, we know that `B` and `C` will depend on `A` with //! different SVH values, so we crawl the normal `-L` paths looking for //! `liba*.rlib` and filter based on the contained SVH. //! //! In the end, this ends up not needing `--extern` to specify upstream //! transitive dependencies. //! //! # Wrapping up //! //! That's the general overview of loading crates in the compiler, but it's by //! no means all of the necessary details. Take a look at the rest of //! metadata::loader or metadata::creader for all the juicy details! use back::archive::{METADATA_FILENAME}; use back::svh::Svh; use driver::session::Session; use llvm; use llvm::{False, ObjectFile, mk_section_iter}; use llvm::archive_ro::ArchiveRO; use metadata::cstore::{MetadataBlob, MetadataVec, MetadataArchive}; use metadata::decoder; use metadata::encoder; use metadata::filesearch::{FileSearch, FileMatches, FileDoesntMatch}; use syntax::codemap::Span; use syntax::diagnostic::SpanHandler; use util::fs; use std::c_str::ToCStr; use std::cmp; use std::collections::hash_map::{Occupied, Vacant}; use std::collections::{HashMap, HashSet}; use std::io::fs::PathExtensions; use std::io; use std::ptr; use std::slice; use std::string; use std::time::Duration; use flate; pub struct CrateMismatch { path: Path, got: String, } pub struct Context<'a> { pub sess: &'a Session, pub span: Span, pub ident: &'a str, pub crate_name: &'a str, pub hash: Option<&'a Svh>, pub triple: &'a str, pub filesearch: FileSearch<'a>, pub root: &'a Option<CratePaths>, pub rejected_via_hash: Vec<CrateMismatch>, pub rejected_via_triple: Vec<CrateMismatch>, pub should_match_name: bool, } pub struct Library { pub dylib: Option<Path>, pub rlib: Option<Path>, pub metadata: MetadataBlob, } pub struct ArchiveMetadata { _archive: ArchiveRO, // points into self._archive data: *const [u8], } pub struct CratePaths { pub ident: String, pub dylib: Option<Path>, pub rlib: Option<Path> } impl CratePaths { fn paths(&self) -> Vec<Path> { match (&self.dylib, &self.rlib) { (&None, &None) => vec!(), (&Some(ref p), &None) | (&None, &Some(ref p)) => vec!(p.clone()), (&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()), } } } impl<'a> Context<'a> { pub fn maybe_load_library_crate(&mut self) -> Option<Library> { self.find_library_crate() } pub fn load_library_crate(&mut self) -> Library { match self.find_library_crate() { Some(t) => t, None => { self.report_load_errs(); unreachable!() } } } pub fn report_load_errs(&mut self) { let message = if self.rejected_via_hash.len() > 0 { format!("found possibly newer version of crate `{}`", self.ident) } else if self.rejected_via_triple.len() > 0 { format!("found incorrect triple for crate `{}`", self.ident) } else { format!("can't find crate for `{}`", self.ident) }; let message = match self.root { &None => message, &Some(ref r) => format!("{} which `{}` depends on", message, r.ident) }; self.sess.span_err(self.span, message.as_slice()); let mismatches = self.rejected_via_triple.iter(); if self.rejected_via_triple.len() > 0 { self.sess.span_note(self.span, format!("expected triple of {}", self.triple).as_slice()); for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path {}{}, triple {}: {}", self.ident, "#", i+1, got, path.display()).as_slice()); } } if self.rejected_via_hash.len() > 0 { self.sess.span_note(self.span, "perhaps this crate needs \ to be recompiled?"); let mismatches = self.rejected_via_hash.iter(); for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path {}{}: {}", self.ident, "#", i+1, path.display()).as_slice()); } match self.root { &None => {} &Some(ref r) => { for (i, path) in r.paths().iter().enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path #{}: {}", r.ident, i+1, path.display()).as_slice()); } } } } self.sess.abort_if_errors(); } fn find_library_crate(&mut self) -> Option<Library> { // If an SVH is specified, then this is a transitive dependency that // must be loaded via -L plus some filtering. if self.hash.is_none() { self.should_match_name = false; match self.find_commandline_library() { Some(l) => return Some(l), None => {} } self.should_match_name = true; } let dypair = self.dylibname(); // want: crate_name.dir_part() + prefix + crate_name.file_part + "-" let dylib_prefix = format!("{}{}", dypair.ref0(), self.crate_name); let rlib_prefix = format!("lib{}", self.crate_name); let mut candidates = HashMap::new(); // First, find all possible candidate rlibs and dylibs purely based on // the name of the files themselves. We're trying to match against an // exact crate name and a possibly an exact hash. // // During this step, we can filter all found libraries based on the // name and id found in the crate id (we ignore the path portion for // filename matching), as well as the exact hash (if specified). If we // end up having many candidates, we must look at the metadata to // perform exact matches against hashes/crate ids. Note that opening up // the metadata is where we do an exact match against the full contents // of the crate id (path/name/id). // // The goal of this step is to look at as little metadata as possible. self.filesearch.search(|path| { let file = match path.filename_str() { None => return FileDoesntMatch, Some(file) => file, }; let (hash, rlib) = if file.starts_with(rlib_prefix.as_slice()) && file.ends_with(".rlib") { (file.slice(rlib_prefix.len(), file.len() - ".rlib".len()), true) } else if file.starts_with(dylib_prefix.as_slice()) && file.ends_with(dypair.ref1().as_slice()) { (file.slice(dylib_prefix.len(), file.len() - dypair.ref1().len()), false) } else { return FileDoesntMatch }; info!("lib candidate: {}", path.display()); let slot = match candidates.entry(hash.to_string()) { Occupied(entry) => entry.into_mut(), Vacant(entry) => entry.set((HashSet::new(), HashSet::new())), }; let (ref mut rlibs, ref mut dylibs) = *slot; if rlib { rlibs.insert(fs::realpath(path).unwrap()); } else { dylibs.insert(fs::realpath(path).unwrap()); } FileMatches }); // We have now collected all known libraries into a set of candidates // keyed of the filename hash listed. For each filename, we also have a // list of rlibs/dylibs that apply. Here, we map each of these lists // (per hash), to a Library candidate for returning. // // A Library candidate is created if the metadata for the set of // libraries corresponds to the crate id and hash criteria that this // search is being performed for. let mut libraries = Vec::new(); for (_hash, (rlibs, dylibs)) in candidates.into_iter() { let mut metadata = None; let rlib = self.extract_one(rlibs, "rlib", &mut metadata); let dylib = self.extract_one(dylibs, "dylib", &mut metadata); match metadata { Some(metadata) => { libraries.push(Library { dylib: dylib, rlib: rlib, metadata: metadata, }) } None => {} } } // Having now translated all relevant found hashes into libraries, see // what we've got and figure out if we found multiple candidates for // libraries or not. match libraries.len() { 0 => None, 1 => Some(libraries.into_iter().next().unwrap()), _ => { self.sess.span_err(self.span, format!("multiple matching crates for `{}`", self.crate_name).as_slice()); self.sess.note("candidates:"); for lib in libraries.iter() { match lib.dylib { Some(ref p) => { self.sess.note(format!("path: {}", p.display()).as_slice()); } None => {} } match lib.rlib { Some(ref p) => { self.sess.note(format!("path: {}", p.display()).as_slice()); } None => {} } let data = lib.metadata.as_slice(); let name = decoder::get_crate_name(data); note_crate_name(self.sess.diagnostic(), name.as_slice()); } None } } } // Attempts to extract *one* library from the set `m`. If the set has no // elements, `None` is returned. If the set has more than one element, then // the errors and notes are emitted about the set of libraries. // // With only one library in the set, this function will extract it, and then // read the metadata from it if `*slot` is `None`. If the metadata couldn't // be read, it is assumed that the file isn't a valid rust library (no // errors are emitted). fn extract_one(&mut self, m: HashSet<Path>, flavor: &str, slot: &mut Option<MetadataBlob>) -> Option<Path> { let mut ret = None::<Path>; let mut error = 0u; if slot.is_some() { // FIXME(#10786): for an optimization, we only read one of the // library's metadata sections. In theory we should // read both, but reading dylib metadata is quite // slow. if m.len() == 0 { return None } else if m.len() == 1 { return Some(m.into_iter().next().unwrap()) } } for lib in m.into_iter() { info!("{} reading metadata from: {}", flavor, lib.display()); let metadata = match get_metadata_section(self.sess.target.target.options.is_like_osx, &lib) { Ok(blob) => { if self.crate_matches(blob.as_slice(), &lib) { blob } else { info!("metadata mismatch"); continue } } Err(_) => { info!("no metadata found"); continue } }; if ret.is_some() { self.sess.span_err(self.span, format!("multiple {} candidates for `{}` \ found", flavor, self.crate_name).as_slice()); self.sess.span_note(self.span, format!(r"candidate #1: {}", ret.as_ref().unwrap() .display()).as_slice()); error = 1; ret = None; } if error > 0 { error += 1; self.sess.span_note(self.span, format!(r"candidate #{}: {}", error, lib.display()).as_slice()); continue } *slot = Some(metadata); ret = Some(lib); } return if error > 0 {None} else {ret} } fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool { if self.should_match_name { match decoder::maybe_get_crate_name(crate_data) { Some(ref name) if self.crate_name == name.as_slice() => {} _ => { info!("Rejecting via crate name"); return false } } } let hash = match decoder::maybe_get_crate_hash(crate_data) { Some(hash) => hash, None => { info!("Rejecting via lack of crate hash"); return false; } }; let triple = match decoder::get_crate_triple(crate_data) { None => { debug!("triple not present"); return false } Some(t) => t, }; if triple.as_slice() != self.triple { info!("Rejecting via crate triple: expected {} got {}", self.triple, triple); self.rejected_via_triple.push(CrateMismatch { path: libpath.clone(), got: triple.to_string() }); return false; } match self.hash { None => true, Some(myhash) => { if *myhash != hash { info!("Rejecting via hash: expected {} got {}", *myhash, hash); self.rejected_via_hash.push(CrateMismatch { path: libpath.clone(), got: myhash.as_str().to_string() }); false } else { true } } } } // Returns the corresponding (prefix, suffix) that files need to have for // dynamic libraries fn dylibname(&self) -> (String, String) { let t = &self.sess.target.target; (t.options.dll_prefix.clone(), t.options.dll_suffix.clone()) } fn find_commandline_library(&mut self) -> Option<Library> { let locs = match self.sess.opts.externs.find_equiv(self.crate_name) { Some(s) => s, None => return None, }; // First, filter out all libraries that look suspicious. We only accept // files which actually exist that have the correct naming scheme for // rlibs/dylibs. let sess = self.sess; let dylibname = self.dylibname(); let mut rlibs = HashSet::new(); let mut dylibs = HashSet::new(); { let mut locs = locs.iter().map(|l| Path::new(l.as_slice())).filter(|loc| { if !loc.exists() { sess.err(format!("extern location for {} does not exist: {}", self.crate_name, loc.display()).as_slice()); return false; } let file = match loc.filename_str() { Some(file) => file, None => { sess.err(format!("extern location for {} is not a file: {}", self.crate_name, loc.display()).as_slice()); return false; } }; if file.starts_with("lib") && file.ends_with(".rlib") { return true } else { let (ref prefix, ref suffix) = dylibname; if file.starts_with(prefix.as_slice()) && file.ends_with(suffix.as_slice()) { return true } } sess.err(format!("extern location for {} is of an unknown type: {}", self.crate_name, loc.display()).as_slice()); false }); // Now that we have an iterator of good candidates, make sure there's at // most one rlib and at most one dylib. for loc in locs { if loc.filename_str().unwrap().ends_with(".rlib") { rlibs.insert(fs::realpath(&loc).unwrap()); } else { dylibs.insert(fs::realpath(&loc).unwrap()); } } }; // Extract the rlib/dylib pair. let mut metadata = None; let rlib = self.extract_one(rlibs, "rlib", &mut metadata); let dylib = self.extract_one(dylibs, "dylib", &mut metadata); if rlib.is_none() && dylib.is_none() { return None } match metadata { Some(metadata) => Some(Library { dylib: dylib, rlib: rlib, metadata: metadata, }), None => None, } } } pub fn note_crate_name(diag: &SpanHandler, name: &str) { diag.handler().note(format!("crate name: {}", name).as_slice()); } impl ArchiveMetadata { fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> { let data = match ar.read(METADATA_FILENAME) { Some(data) => data as *const [u8], None => { debug!("didn't find '{}' in the archive", METADATA_FILENAME); return None; } }; Some(ArchiveMetadata { _archive: ar, data: data, }) } pub fn as_slice<'a>(&'a self) -> &'a [u8]
} // Just a small wrapper to time how long reading metadata takes. fn get_metadata_section(is_osx: bool, filename: &Path) -> Result<MetadataBlob, String> { let mut ret = None; let dur = Duration::span(|| { ret = Some(get_metadata_section_imp(is_osx, filename)); }); info!("reading {} => {}ms", filename.filename_display(), dur.num_milliseconds()); return ret.unwrap();; } fn get_metadata_section_imp(is_osx: bool, filename: &Path) -> Result<MetadataBlob, String> { if !filename.exists() { return Err(format!("no such file: '{}'", filename.display())); } if filename.filename_str().unwrap().ends_with(".rlib") { // Use ArchiveRO for speed here, it's backed by LLVM and uses mmap // internally to read the file. We also avoid even using a memcpy by // just keeping the archive along while the metadata is in use. let archive = match ArchiveRO::open(filename) { Some(ar) => ar, None => { debug!("llvm didn't like `{}`", filename.display()); return Err(format!("failed to read rlib metadata: '{}'", filename.display())); } }; return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) { None => { return Err((format!("failed to read rlib metadata: '{}'", filename.display()))) } Some(blob) => return Ok(blob) } } unsafe { let mb = filename.with_c_str(|buf| { llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf) }); if mb as int == 0 { return Err(format!("error reading library: '{}'", filename.display())) } let of = match ObjectFile::new(mb) { Some(of) => of, _ => { return Err((format!("provided path not an object file: '{}'", filename.display()))) } }; let si = mk_section_iter(of.llof); while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False { let mut name_buf = ptr::null(); let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf); let name = string::raw::from_buf_len(name_buf as *const u8, name_len as uint); debug!("get_metadata_section: name {}", name); if read_meta_section_name(is_osx).as_slice() == name.as_slice() { let cbuf = llvm::LLVMGetSectionContents(si.llsi); let csz = llvm::LLVMGetSectionSize(si.llsi) as uint; let mut found = Err(format!("metadata not found: '{}'", filename.display())); let cvbuf: *const u8 = cbuf as *const u8; let vlen = encoder::metadata_encoding_version.len(); debug!("checking {} bytes of metadata-version stamp", vlen); let minsz = cmp::min(vlen, csz); let version_ok = slice::raw::buf_as_slice(cvbuf, minsz, |buf0| buf0 == encoder::metadata_encoding_version); if !version_ok { return Err((format!("incompatible metadata version found: '{}'", filename.display()))); } let cvbuf1 = cvbuf.offset(vlen as int); debug!("inflating {} bytes of compressed metadata", csz - vlen); slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| { match flate::inflate_bytes(bytes) { Some(inflated) => found = Ok(MetadataVec(inflated)), None => { found = Err(format!("failed to decompress \ metadata for: '{}'", filename.display())) } } }); if found.is_ok() { return found; } } llvm::LLVMMoveToNextSection(si.llsi); } return Err(format!("metadata not found: '{}'", filename.display())); } } pub fn meta_section_name(is_osx: bool) -> &'static str { if is_osx { "__DATA,__note.rustc" } else { ".note.rustc" } } pub fn read_meta_section_name(is_osx: bool) -> &'static str { if is_osx { "__note.rustc" } else { ".note.rustc" } } // A diagnostic function for dumping crate metadata to an output stream pub fn list_file_metadata(is_osx: bool, path: &Path, out: &mut io::Writer) -> io::IoResult<()> { match get_metadata_section(is_osx, path) { Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out), Err(msg) => { write!(out, "{}\n", msg) } } }
{ unsafe { &*self.data } }
identifier_body
loader.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Finds crate binaries and loads their metadata //! //! Might I be the first to welcome you to a world of platform differences, //! version requirements, dependency graphs, conflicting desires, and fun! This //! is the major guts (along with metadata::creader) of the compiler for loading //! crates and resolving dependencies. Let's take a tour! //! //! # The problem //! //! Each invocation of the compiler is immediately concerned with one primary //! problem, to connect a set of crates to resolved crates on the filesystem. //! Concretely speaking, the compiler follows roughly these steps to get here: //! //! 1. Discover a set of `extern crate` statements. //! 2. Transform these directives into crate names. If the directive does not //! have an explicit name, then the identifier is the name. //! 3. For each of these crate names, find a corresponding crate on the //! filesystem. //! //! Sounds easy, right? Let's walk into some of the nuances. //! //! ## Transitive Dependencies //! //! Let's say we've got three crates: A, B, and C. A depends on B, and B depends //! on C. When we're compiling A, we primarily need to find and locate B, but we //! also end up needing to find and locate C as well. //! //! The reason for this is that any of B's types could be composed of C's types, //! any function in B could return a type from C, etc. To be able to guarantee //! that we can always typecheck/translate any function, we have to have //! complete knowledge of the whole ecosystem, not just our immediate //! dependencies. //! //! So now as part of the "find a corresponding crate on the filesystem" step //! above, this involves also finding all crates for *all upstream //! dependencies*. This includes all dependencies transitively. //! //! ## Rlibs and Dylibs //! //! The compiler has two forms of intermediate dependencies. These are dubbed //! rlibs and dylibs for the static and dynamic variants, respectively. An rlib //! is a rustc-defined file format (currently just an ar archive) while a dylib //! is a platform-defined dynamic library. Each library has a metadata somewhere //! inside of it. //! //! When translating a crate name to a crate on the filesystem, we all of a //! sudden need to take into account both rlibs and dylibs! Linkage later on may //! use either one of these files, as each has their pros/cons. The job of crate //! loading is to discover what's possible by finding all candidates. //! //! Most parts of this loading systems keep the dylib/rlib as just separate //! variables. //! //! ## Where to look? //! //! We can't exactly scan your whole hard drive when looking for dependencies, //! so we need to places to look. Currently the compiler will implicitly add the //! target lib search path ($prefix/lib/rustlib/$target/lib) to any compilation, //! and otherwise all -L flags are added to the search paths. //! //! ## What criterion to select on? //! //! This a pretty tricky area of loading crates. Given a file, how do we know //! whether it's the right crate? Currently, the rules look along these lines: //! //! 1. Does the filename match an rlib/dylib pattern? That is to say, does the //! filename have the right prefix/suffix? //! 2. Does the filename have the right prefix for the crate name being queried? //! This is filtering for files like `libfoo*.rlib` and such. //! 3. Is the file an actual rust library? This is done by loading the metadata //! from the library and making sure it's actually there. //! 4. Does the name in the metadata agree with the name of the library? //! 5. Does the target in the metadata agree with the current target? //! 6. Does the SVH match? (more on this later) //! //! If the file answers `yes` to all these questions, then the file is //! considered as being *candidate* for being accepted. It is illegal to have //! more than two candidates as the compiler has no method by which to resolve //! this conflict. Additionally, rlib/dylib candidates are considered //! separately. //! //! After all this has happened, we have 1 or two files as candidates. These //! represent the rlib/dylib file found for a library, and they're returned as //! being found. //! //! ### What about versions? //! //! A lot of effort has been put forth to remove versioning from the compiler. //! There have been forays in the past to have versioning baked in, but it was //! largely always deemed insufficient to the point that it was recognized that //! it's probably something the compiler shouldn't do anyway due to its //! complicated nature and the state of the half-baked solutions. //! //! With a departure from versioning, the primary criterion for loading crates //! is just the name of a crate. If we stopped here, it would imply that you //! could never link two crates of the same name from different sources //! together, which is clearly a bad state to be in. //! //! To resolve this problem, we come to the next section! //! //! # Expert Mode //! //! A number of flags have been added to the compiler to solve the "version //! problem" in the previous section, as well as generally enabling more //! powerful usage of the crate loading system of the compiler. The goal of //! these flags and options are to enable third-party tools to drive the //! compiler with prior knowledge about how the world should look. //! //! ## The `--extern` flag //! //! The compiler accepts a flag of this form a number of times: //! //! ```notrust //! --extern crate-name=path/to/the/crate.rlib //! ``` //! //! This flag is basically the following letter to the compiler: //! //! > Dear rustc, //! > //! > When you are attempting to load the immediate dependency `crate-name`, I //! > would like you too assume that the library is located at //! > `path/to/the/crate.rlib`, and look nowhere else. Also, please do not //! > assume that the path I specified has the name `crate-name`. //! //! This flag basically overrides most matching logic except for validating that //! the file is indeed a rust library. The same `crate-name` can be specified //! twice to specify the rlib/dylib pair. //! //! ## Enabling "multiple versions" //! //! This basically boils down to the ability to specify arbitrary packages to //! the compiler. For example, if crate A wanted to use Bv1 and Bv2, then it //! would look something like: //! //! ```ignore //! extern crate b1; //! extern crate b2; //! //! fn main() {} //! ``` //! //! and the compiler would be invoked as: //! //! ```notrust //! rustc a.rs --extern b1=path/to/libb1.rlib --extern b2=path/to/libb2.rlib //! ``` //! //! In this scenario there are two crates named `b` and the compiler must be //! manually driven to be informed where each crate is. //! //! ## Frobbing symbols //! //! One of the immediate problems with linking the same library together twice //! in the same problem is dealing with duplicate symbols. The primary way to //! deal with this in rustc is to add hashes to the end of each symbol. //! //! In order to force hashes to change between versions of a library, if //! desired, the compiler exposes an option `-C metadata=foo`, which is used to //! initially seed each symbol hash. The string `foo` is prepended to each //! string-to-hash to ensure that symbols change over time. //! //! ## Loading transitive dependencies //! //! Dealing with same-named-but-distinct crates is not just a local problem, but //! one that also needs to be dealt with for transitive dependencies. Note that //! in the letter above `--extern` flags only apply to the *local* set of //! dependencies, not the upstream transitive dependencies. Consider this //! dependency graph: //! //! ```notrust //! A.1 A.2 //! | | //! | | //! B C //! \ / //! \ / //! D //! ``` //! //! In this scenario, when we compile `D`, we need to be able to distinctly //! resolve `A.1` and `A.2`, but an `--extern` flag cannot apply to these //! transitive dependencies. //! //! Note that the key idea here is that `B` and `C` are both *already compiled*. //! That is, they have already resolved their dependencies. Due to unrelated //! technical reasons, when a library is compiled, it is only compatible with //! the *exact same* version of the upstream libraries it was compiled against. //! We use the "Strict Version Hash" to identify the exact copy of an upstream //! library. //! //! With this knowledge, we know that `B` and `C` will depend on `A` with //! different SVH values, so we crawl the normal `-L` paths looking for //! `liba*.rlib` and filter based on the contained SVH. //! //! In the end, this ends up not needing `--extern` to specify upstream //! transitive dependencies. //! //! # Wrapping up //! //! That's the general overview of loading crates in the compiler, but it's by //! no means all of the necessary details. Take a look at the rest of //! metadata::loader or metadata::creader for all the juicy details! use back::archive::{METADATA_FILENAME}; use back::svh::Svh; use driver::session::Session; use llvm; use llvm::{False, ObjectFile, mk_section_iter}; use llvm::archive_ro::ArchiveRO; use metadata::cstore::{MetadataBlob, MetadataVec, MetadataArchive}; use metadata::decoder; use metadata::encoder; use metadata::filesearch::{FileSearch, FileMatches, FileDoesntMatch}; use syntax::codemap::Span; use syntax::diagnostic::SpanHandler; use util::fs; use std::c_str::ToCStr; use std::cmp; use std::collections::hash_map::{Occupied, Vacant}; use std::collections::{HashMap, HashSet}; use std::io::fs::PathExtensions; use std::io; use std::ptr; use std::slice; use std::string; use std::time::Duration; use flate; pub struct CrateMismatch { path: Path, got: String, } pub struct Context<'a> { pub sess: &'a Session, pub span: Span, pub ident: &'a str, pub crate_name: &'a str, pub hash: Option<&'a Svh>, pub triple: &'a str, pub filesearch: FileSearch<'a>, pub root: &'a Option<CratePaths>, pub rejected_via_hash: Vec<CrateMismatch>, pub rejected_via_triple: Vec<CrateMismatch>, pub should_match_name: bool,
pub rlib: Option<Path>, pub metadata: MetadataBlob, } pub struct ArchiveMetadata { _archive: ArchiveRO, // points into self._archive data: *const [u8], } pub struct CratePaths { pub ident: String, pub dylib: Option<Path>, pub rlib: Option<Path> } impl CratePaths { fn paths(&self) -> Vec<Path> { match (&self.dylib, &self.rlib) { (&None, &None) => vec!(), (&Some(ref p), &None) | (&None, &Some(ref p)) => vec!(p.clone()), (&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()), } } } impl<'a> Context<'a> { pub fn maybe_load_library_crate(&mut self) -> Option<Library> { self.find_library_crate() } pub fn load_library_crate(&mut self) -> Library { match self.find_library_crate() { Some(t) => t, None => { self.report_load_errs(); unreachable!() } } } pub fn report_load_errs(&mut self) { let message = if self.rejected_via_hash.len() > 0 { format!("found possibly newer version of crate `{}`", self.ident) } else if self.rejected_via_triple.len() > 0 { format!("found incorrect triple for crate `{}`", self.ident) } else { format!("can't find crate for `{}`", self.ident) }; let message = match self.root { &None => message, &Some(ref r) => format!("{} which `{}` depends on", message, r.ident) }; self.sess.span_err(self.span, message.as_slice()); let mismatches = self.rejected_via_triple.iter(); if self.rejected_via_triple.len() > 0 { self.sess.span_note(self.span, format!("expected triple of {}", self.triple).as_slice()); for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path {}{}, triple {}: {}", self.ident, "#", i+1, got, path.display()).as_slice()); } } if self.rejected_via_hash.len() > 0 { self.sess.span_note(self.span, "perhaps this crate needs \ to be recompiled?"); let mismatches = self.rejected_via_hash.iter(); for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path {}{}: {}", self.ident, "#", i+1, path.display()).as_slice()); } match self.root { &None => {} &Some(ref r) => { for (i, path) in r.paths().iter().enumerate() { self.sess.fileline_note(self.span, format!("crate `{}` path #{}: {}", r.ident, i+1, path.display()).as_slice()); } } } } self.sess.abort_if_errors(); } fn find_library_crate(&mut self) -> Option<Library> { // If an SVH is specified, then this is a transitive dependency that // must be loaded via -L plus some filtering. if self.hash.is_none() { self.should_match_name = false; match self.find_commandline_library() { Some(l) => return Some(l), None => {} } self.should_match_name = true; } let dypair = self.dylibname(); // want: crate_name.dir_part() + prefix + crate_name.file_part + "-" let dylib_prefix = format!("{}{}", dypair.ref0(), self.crate_name); let rlib_prefix = format!("lib{}", self.crate_name); let mut candidates = HashMap::new(); // First, find all possible candidate rlibs and dylibs purely based on // the name of the files themselves. We're trying to match against an // exact crate name and a possibly an exact hash. // // During this step, we can filter all found libraries based on the // name and id found in the crate id (we ignore the path portion for // filename matching), as well as the exact hash (if specified). If we // end up having many candidates, we must look at the metadata to // perform exact matches against hashes/crate ids. Note that opening up // the metadata is where we do an exact match against the full contents // of the crate id (path/name/id). // // The goal of this step is to look at as little metadata as possible. self.filesearch.search(|path| { let file = match path.filename_str() { None => return FileDoesntMatch, Some(file) => file, }; let (hash, rlib) = if file.starts_with(rlib_prefix.as_slice()) && file.ends_with(".rlib") { (file.slice(rlib_prefix.len(), file.len() - ".rlib".len()), true) } else if file.starts_with(dylib_prefix.as_slice()) && file.ends_with(dypair.ref1().as_slice()) { (file.slice(dylib_prefix.len(), file.len() - dypair.ref1().len()), false) } else { return FileDoesntMatch }; info!("lib candidate: {}", path.display()); let slot = match candidates.entry(hash.to_string()) { Occupied(entry) => entry.into_mut(), Vacant(entry) => entry.set((HashSet::new(), HashSet::new())), }; let (ref mut rlibs, ref mut dylibs) = *slot; if rlib { rlibs.insert(fs::realpath(path).unwrap()); } else { dylibs.insert(fs::realpath(path).unwrap()); } FileMatches }); // We have now collected all known libraries into a set of candidates // keyed of the filename hash listed. For each filename, we also have a // list of rlibs/dylibs that apply. Here, we map each of these lists // (per hash), to a Library candidate for returning. // // A Library candidate is created if the metadata for the set of // libraries corresponds to the crate id and hash criteria that this // search is being performed for. let mut libraries = Vec::new(); for (_hash, (rlibs, dylibs)) in candidates.into_iter() { let mut metadata = None; let rlib = self.extract_one(rlibs, "rlib", &mut metadata); let dylib = self.extract_one(dylibs, "dylib", &mut metadata); match metadata { Some(metadata) => { libraries.push(Library { dylib: dylib, rlib: rlib, metadata: metadata, }) } None => {} } } // Having now translated all relevant found hashes into libraries, see // what we've got and figure out if we found multiple candidates for // libraries or not. match libraries.len() { 0 => None, 1 => Some(libraries.into_iter().next().unwrap()), _ => { self.sess.span_err(self.span, format!("multiple matching crates for `{}`", self.crate_name).as_slice()); self.sess.note("candidates:"); for lib in libraries.iter() { match lib.dylib { Some(ref p) => { self.sess.note(format!("path: {}", p.display()).as_slice()); } None => {} } match lib.rlib { Some(ref p) => { self.sess.note(format!("path: {}", p.display()).as_slice()); } None => {} } let data = lib.metadata.as_slice(); let name = decoder::get_crate_name(data); note_crate_name(self.sess.diagnostic(), name.as_slice()); } None } } } // Attempts to extract *one* library from the set `m`. If the set has no // elements, `None` is returned. If the set has more than one element, then // the errors and notes are emitted about the set of libraries. // // With only one library in the set, this function will extract it, and then // read the metadata from it if `*slot` is `None`. If the metadata couldn't // be read, it is assumed that the file isn't a valid rust library (no // errors are emitted). fn extract_one(&mut self, m: HashSet<Path>, flavor: &str, slot: &mut Option<MetadataBlob>) -> Option<Path> { let mut ret = None::<Path>; let mut error = 0u; if slot.is_some() { // FIXME(#10786): for an optimization, we only read one of the // library's metadata sections. In theory we should // read both, but reading dylib metadata is quite // slow. if m.len() == 0 { return None } else if m.len() == 1 { return Some(m.into_iter().next().unwrap()) } } for lib in m.into_iter() { info!("{} reading metadata from: {}", flavor, lib.display()); let metadata = match get_metadata_section(self.sess.target.target.options.is_like_osx, &lib) { Ok(blob) => { if self.crate_matches(blob.as_slice(), &lib) { blob } else { info!("metadata mismatch"); continue } } Err(_) => { info!("no metadata found"); continue } }; if ret.is_some() { self.sess.span_err(self.span, format!("multiple {} candidates for `{}` \ found", flavor, self.crate_name).as_slice()); self.sess.span_note(self.span, format!(r"candidate #1: {}", ret.as_ref().unwrap() .display()).as_slice()); error = 1; ret = None; } if error > 0 { error += 1; self.sess.span_note(self.span, format!(r"candidate #{}: {}", error, lib.display()).as_slice()); continue } *slot = Some(metadata); ret = Some(lib); } return if error > 0 {None} else {ret} } fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool { if self.should_match_name { match decoder::maybe_get_crate_name(crate_data) { Some(ref name) if self.crate_name == name.as_slice() => {} _ => { info!("Rejecting via crate name"); return false } } } let hash = match decoder::maybe_get_crate_hash(crate_data) { Some(hash) => hash, None => { info!("Rejecting via lack of crate hash"); return false; } }; let triple = match decoder::get_crate_triple(crate_data) { None => { debug!("triple not present"); return false } Some(t) => t, }; if triple.as_slice() != self.triple { info!("Rejecting via crate triple: expected {} got {}", self.triple, triple); self.rejected_via_triple.push(CrateMismatch { path: libpath.clone(), got: triple.to_string() }); return false; } match self.hash { None => true, Some(myhash) => { if *myhash != hash { info!("Rejecting via hash: expected {} got {}", *myhash, hash); self.rejected_via_hash.push(CrateMismatch { path: libpath.clone(), got: myhash.as_str().to_string() }); false } else { true } } } } // Returns the corresponding (prefix, suffix) that files need to have for // dynamic libraries fn dylibname(&self) -> (String, String) { let t = &self.sess.target.target; (t.options.dll_prefix.clone(), t.options.dll_suffix.clone()) } fn find_commandline_library(&mut self) -> Option<Library> { let locs = match self.sess.opts.externs.find_equiv(self.crate_name) { Some(s) => s, None => return None, }; // First, filter out all libraries that look suspicious. We only accept // files which actually exist that have the correct naming scheme for // rlibs/dylibs. let sess = self.sess; let dylibname = self.dylibname(); let mut rlibs = HashSet::new(); let mut dylibs = HashSet::new(); { let mut locs = locs.iter().map(|l| Path::new(l.as_slice())).filter(|loc| { if !loc.exists() { sess.err(format!("extern location for {} does not exist: {}", self.crate_name, loc.display()).as_slice()); return false; } let file = match loc.filename_str() { Some(file) => file, None => { sess.err(format!("extern location for {} is not a file: {}", self.crate_name, loc.display()).as_slice()); return false; } }; if file.starts_with("lib") && file.ends_with(".rlib") { return true } else { let (ref prefix, ref suffix) = dylibname; if file.starts_with(prefix.as_slice()) && file.ends_with(suffix.as_slice()) { return true } } sess.err(format!("extern location for {} is of an unknown type: {}", self.crate_name, loc.display()).as_slice()); false }); // Now that we have an iterator of good candidates, make sure there's at // most one rlib and at most one dylib. for loc in locs { if loc.filename_str().unwrap().ends_with(".rlib") { rlibs.insert(fs::realpath(&loc).unwrap()); } else { dylibs.insert(fs::realpath(&loc).unwrap()); } } }; // Extract the rlib/dylib pair. let mut metadata = None; let rlib = self.extract_one(rlibs, "rlib", &mut metadata); let dylib = self.extract_one(dylibs, "dylib", &mut metadata); if rlib.is_none() && dylib.is_none() { return None } match metadata { Some(metadata) => Some(Library { dylib: dylib, rlib: rlib, metadata: metadata, }), None => None, } } } pub fn note_crate_name(diag: &SpanHandler, name: &str) { diag.handler().note(format!("crate name: {}", name).as_slice()); } impl ArchiveMetadata { fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> { let data = match ar.read(METADATA_FILENAME) { Some(data) => data as *const [u8], None => { debug!("didn't find '{}' in the archive", METADATA_FILENAME); return None; } }; Some(ArchiveMetadata { _archive: ar, data: data, }) } pub fn as_slice<'a>(&'a self) -> &'a [u8] { unsafe { &*self.data } } } // Just a small wrapper to time how long reading metadata takes. fn get_metadata_section(is_osx: bool, filename: &Path) -> Result<MetadataBlob, String> { let mut ret = None; let dur = Duration::span(|| { ret = Some(get_metadata_section_imp(is_osx, filename)); }); info!("reading {} => {}ms", filename.filename_display(), dur.num_milliseconds()); return ret.unwrap();; } fn get_metadata_section_imp(is_osx: bool, filename: &Path) -> Result<MetadataBlob, String> { if !filename.exists() { return Err(format!("no such file: '{}'", filename.display())); } if filename.filename_str().unwrap().ends_with(".rlib") { // Use ArchiveRO for speed here, it's backed by LLVM and uses mmap // internally to read the file. We also avoid even using a memcpy by // just keeping the archive along while the metadata is in use. let archive = match ArchiveRO::open(filename) { Some(ar) => ar, None => { debug!("llvm didn't like `{}`", filename.display()); return Err(format!("failed to read rlib metadata: '{}'", filename.display())); } }; return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) { None => { return Err((format!("failed to read rlib metadata: '{}'", filename.display()))) } Some(blob) => return Ok(blob) } } unsafe { let mb = filename.with_c_str(|buf| { llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf) }); if mb as int == 0 { return Err(format!("error reading library: '{}'", filename.display())) } let of = match ObjectFile::new(mb) { Some(of) => of, _ => { return Err((format!("provided path not an object file: '{}'", filename.display()))) } }; let si = mk_section_iter(of.llof); while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False { let mut name_buf = ptr::null(); let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf); let name = string::raw::from_buf_len(name_buf as *const u8, name_len as uint); debug!("get_metadata_section: name {}", name); if read_meta_section_name(is_osx).as_slice() == name.as_slice() { let cbuf = llvm::LLVMGetSectionContents(si.llsi); let csz = llvm::LLVMGetSectionSize(si.llsi) as uint; let mut found = Err(format!("metadata not found: '{}'", filename.display())); let cvbuf: *const u8 = cbuf as *const u8; let vlen = encoder::metadata_encoding_version.len(); debug!("checking {} bytes of metadata-version stamp", vlen); let minsz = cmp::min(vlen, csz); let version_ok = slice::raw::buf_as_slice(cvbuf, minsz, |buf0| buf0 == encoder::metadata_encoding_version); if !version_ok { return Err((format!("incompatible metadata version found: '{}'", filename.display()))); } let cvbuf1 = cvbuf.offset(vlen as int); debug!("inflating {} bytes of compressed metadata", csz - vlen); slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| { match flate::inflate_bytes(bytes) { Some(inflated) => found = Ok(MetadataVec(inflated)), None => { found = Err(format!("failed to decompress \ metadata for: '{}'", filename.display())) } } }); if found.is_ok() { return found; } } llvm::LLVMMoveToNextSection(si.llsi); } return Err(format!("metadata not found: '{}'", filename.display())); } } pub fn meta_section_name(is_osx: bool) -> &'static str { if is_osx { "__DATA,__note.rustc" } else { ".note.rustc" } } pub fn read_meta_section_name(is_osx: bool) -> &'static str { if is_osx { "__note.rustc" } else { ".note.rustc" } } // A diagnostic function for dumping crate metadata to an output stream pub fn list_file_metadata(is_osx: bool, path: &Path, out: &mut io::Writer) -> io::IoResult<()> { match get_metadata_section(is_osx, path) { Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out), Err(msg) => { write!(out, "{}\n", msg) } } }
} pub struct Library { pub dylib: Option<Path>,
random_line_split
test_import.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test of the omero import control.
""" import pytest from path import path import omero.clients import uuid from omero.cli import CLI, NonZeroReturnCode # Workaround for a poorly named module plugin = __import__('omero.plugins.import', globals(), locals(), ['ImportControl'], -1) ImportControl = plugin.ImportControl help_arguments = ("-h", "--javahelp", "--java-help", "--advanced-help") class MockClient(omero.clients.BaseClient): def setSessionId(self, uuid): self._uuid = uuid def getSessionId(self): return self._uuid class TestImport(object): def setup_method(self, method): self.cli = CLI() self.cli.register("import", ImportControl, "TEST") self.args = ["import"] def add_client_dir(self): dist_dir = path(__file__) / ".." / ".." / ".." / ".." / ".." / ".." /\ ".." / "dist" # FIXME: should not be hard-coded dist_dir = dist_dir.abspath() client_dir = dist_dir / "lib" / "client" self.args += ["--clientdir", client_dir] def mkdir(self, parent, name, with_ds_store=False): child = parent / name child.mkdir() if with_ds_store: ds_store = child / ".DS_STORE" ds_store.write("") return child def mkfakescreen(self, screen_dir, nplates=2, nruns=2, nwells=2, nfields=4, with_ds_store=False): fieldfiles = [] for iplate in range(nplates): plate_dir = self.mkdir( screen_dir, "Plate00%s" % str(iplate), with_ds_store=with_ds_store) for irun in range(nruns): run_dir = self.mkdir( plate_dir, "Run00%s" % str(irun), with_ds_store=with_ds_store) for iwell in range(nwells): well_dir = self.mkdir( run_dir, "WellA00%s" % str(iwell), with_ds_store=with_ds_store) for ifield in range(nfields): fieldfile = (well_dir / ("Field00%s.fake" % str(ifield))) fieldfile.write('') fieldfiles.append(fieldfile) return fieldfiles def mkfakepattern(self, tmpdir, nangles=7, ntimepoints=10): spim_dir = tmpdir.join("SPIM") spim_dir.mkdir() tiffiles = [] for angle in range(1, nangles + 1): for timepoint in range(1, ntimepoints + 1): tiffile = (spim_dir / ("spim_TL%s_Angle%s.fake" % (str(timepoint), str(angle)))) tiffile.write('') print str(tiffile) tiffiles.append(tiffile) patternfile = spim_dir / "spim.pattern" patternfile.write("spim_TL<1-%s>_Angle<1-%s>.fake" % (str(ntimepoints), str(nangles))) assert len(tiffiles) == nangles * ntimepoints return patternfile, tiffiles def testDropBoxArgs(self): class MockImportControl(ImportControl): def importer(this, args): assert args.server == "localhost" assert args.port == "4064" assert args.key == "b0742975-03a1-4f6d-b0ac-639943f1a147" assert args.errs == "/tmp/dropbox.err" assert args.file == "/tmp/dropbox.out" self.cli.register("mock-import", MockImportControl, "HELP") self.args = ['-s', 'localhost', '-p', '4064', '-k', 'b0742975-03a1-4f6d-b0ac-639943f1a147'] self.args += ['mock-import', '---errs=/tmp/dropbox.err'] self.args += ['---file=/tmp/dropbox.out'] self.args += ['--', '/OMERO/DropBox/root/tinyTest.d3d.dv'] self.cli.invoke(self.args) @pytest.mark.parametrize('help_argument', help_arguments) def testHelp(self, help_argument): """Test help arguments""" self.args += [help_argument] self.cli.invoke(self.args) @pytest.mark.parametrize('clientdir_exists', [True, False]) def testImportNoClientDirFails(self, tmpdir, clientdir_exists): """Test fake screen import""" fakefile = tmpdir.join("test.fake") fakefile.write('') if clientdir_exists: self.args += ["--clientdir", str(tmpdir)] self.args += [str(fakefile)] with pytest.raises(NonZeroReturnCode): self.cli.invoke(self.args, strict=True) @pytest.mark.parametrize("data", (("1", False), ("3", True))) def testImportDepth(self, tmpdir, capfd, data): """Test import using depth argument""" dir1 = tmpdir.join("a") dir1.mkdir() dir2 = dir1 / "b" dir2.mkdir() fakefile = dir2 / "test.fake" fakefile.write('') self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(dir1)] depth, result = data self.cli.invoke(self.args + ["--depth=%s" % depth], strict=True) o, e = capfd.readouterr() if result: assert str(fakefile) in str(o) else: assert str(fakefile) not in str(o) def testImportFakeImage(self, tmpdir, capfd): """Test fake image import""" fakefile = tmpdir.join("test.fake") fakefile.write('') self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(fakefile)] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FakeReader' assert outputlines[-2] == str(fakefile) assert outputlines[-3] == \ "# Group: %s SPW: false Reader: %s" % (str(fakefile), reader) @pytest.mark.parametrize('with_ds_store', (True, False)) def testImportFakeScreen(self, tmpdir, capfd, with_ds_store): """Test fake screen import""" screen_dir = tmpdir.join("screen.fake") screen_dir.mkdir() fieldfiles = self.mkfakescreen( screen_dir, with_ds_store=with_ds_store) self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(fieldfiles[0])] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FakeReader' assert outputlines[-len(fieldfiles)-2] == \ "# Group: %s SPW: true Reader: %s" % (str(fieldfiles[0]), reader) for i in range(len(fieldfiles)): assert outputlines[-1-len(fieldfiles)+i] == str(fieldfiles[i]) def testImportPattern(self, tmpdir, capfd): """Test pattern import""" patternfile, tiffiles = self.mkfakepattern(tmpdir) self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(patternfile)] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FilePatternReader' print o assert outputlines[-len(tiffiles)-3] == \ "# Group: %s SPW: false Reader: %s" % (str(patternfile), reader) assert outputlines[-len(tiffiles)-2] == str(patternfile) for i in range(len(tiffiles)): assert outputlines[-1-len(tiffiles)+i] == str(tiffiles[i]) @pytest.mark.parametrize('hostname', ['localhost', 'servername']) @pytest.mark.parametrize('port', [None, 4064, 14064]) def testLoginArguments(self, monkeypatch, hostname, port, tmpdir): self.args += ['test.fake'] control = self.cli.controls['import'] control.command_args = [] sessionid = str(uuid.uuid4()) def new_client(x): if port: c = MockClient(hostname, port) else: c = MockClient(hostname) c.setSessionId(sessionid) return c monkeypatch.setattr(self.cli, 'conn', new_client) ice_config = tmpdir / 'ice.config' ice_config.write('omero.host=%s\nomero.port=%g' % ( hostname, (port or 4064))) monkeypatch.setenv("ICE_CONFIG", ice_config) control.set_login_arguments(self.cli.parser.parse_args(self.args)) expected_args = ['-s', '%s' % hostname] expected_args += ['-p', '%s' % (port or 4064)] expected_args += ['-k', '%s' % sessionid] assert control.command_args == expected_args
Copyright 2009 Glencoe Software, Inc. All rights reserved. Use is subject to license terms supplied in LICENSE.txt
random_line_split
test_import.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test of the omero import control. Copyright 2009 Glencoe Software, Inc. All rights reserved. Use is subject to license terms supplied in LICENSE.txt """ import pytest from path import path import omero.clients import uuid from omero.cli import CLI, NonZeroReturnCode # Workaround for a poorly named module plugin = __import__('omero.plugins.import', globals(), locals(), ['ImportControl'], -1) ImportControl = plugin.ImportControl help_arguments = ("-h", "--javahelp", "--java-help", "--advanced-help") class MockClient(omero.clients.BaseClient): def setSessionId(self, uuid): self._uuid = uuid def getSessionId(self): return self._uuid class TestImport(object): def setup_method(self, method): self.cli = CLI() self.cli.register("import", ImportControl, "TEST") self.args = ["import"] def add_client_dir(self): dist_dir = path(__file__) / ".." / ".." / ".." / ".." / ".." / ".." /\ ".." / "dist" # FIXME: should not be hard-coded dist_dir = dist_dir.abspath() client_dir = dist_dir / "lib" / "client" self.args += ["--clientdir", client_dir] def mkdir(self, parent, name, with_ds_store=False): child = parent / name child.mkdir() if with_ds_store: ds_store = child / ".DS_STORE" ds_store.write("") return child def mkfakescreen(self, screen_dir, nplates=2, nruns=2, nwells=2, nfields=4, with_ds_store=False): fieldfiles = [] for iplate in range(nplates): plate_dir = self.mkdir( screen_dir, "Plate00%s" % str(iplate), with_ds_store=with_ds_store) for irun in range(nruns): run_dir = self.mkdir( plate_dir, "Run00%s" % str(irun), with_ds_store=with_ds_store) for iwell in range(nwells): well_dir = self.mkdir( run_dir, "WellA00%s" % str(iwell), with_ds_store=with_ds_store) for ifield in range(nfields): fieldfile = (well_dir / ("Field00%s.fake" % str(ifield))) fieldfile.write('') fieldfiles.append(fieldfile) return fieldfiles def mkfakepattern(self, tmpdir, nangles=7, ntimepoints=10): spim_dir = tmpdir.join("SPIM") spim_dir.mkdir() tiffiles = [] for angle in range(1, nangles + 1): for timepoint in range(1, ntimepoints + 1): tiffile = (spim_dir / ("spim_TL%s_Angle%s.fake" % (str(timepoint), str(angle)))) tiffile.write('') print str(tiffile) tiffiles.append(tiffile) patternfile = spim_dir / "spim.pattern" patternfile.write("spim_TL<1-%s>_Angle<1-%s>.fake" % (str(ntimepoints), str(nangles))) assert len(tiffiles) == nangles * ntimepoints return patternfile, tiffiles def testDropBoxArgs(self): class MockImportControl(ImportControl): def importer(this, args): assert args.server == "localhost" assert args.port == "4064" assert args.key == "b0742975-03a1-4f6d-b0ac-639943f1a147" assert args.errs == "/tmp/dropbox.err" assert args.file == "/tmp/dropbox.out" self.cli.register("mock-import", MockImportControl, "HELP") self.args = ['-s', 'localhost', '-p', '4064', '-k', 'b0742975-03a1-4f6d-b0ac-639943f1a147'] self.args += ['mock-import', '---errs=/tmp/dropbox.err'] self.args += ['---file=/tmp/dropbox.out'] self.args += ['--', '/OMERO/DropBox/root/tinyTest.d3d.dv'] self.cli.invoke(self.args) @pytest.mark.parametrize('help_argument', help_arguments) def testHelp(self, help_argument): """Test help arguments""" self.args += [help_argument] self.cli.invoke(self.args) @pytest.mark.parametrize('clientdir_exists', [True, False]) def testImportNoClientDirFails(self, tmpdir, clientdir_exists): """Test fake screen import""" fakefile = tmpdir.join("test.fake") fakefile.write('') if clientdir_exists: self.args += ["--clientdir", str(tmpdir)] self.args += [str(fakefile)] with pytest.raises(NonZeroReturnCode): self.cli.invoke(self.args, strict=True) @pytest.mark.parametrize("data", (("1", False), ("3", True))) def testImportDepth(self, tmpdir, capfd, data): """Test import using depth argument""" dir1 = tmpdir.join("a") dir1.mkdir() dir2 = dir1 / "b" dir2.mkdir() fakefile = dir2 / "test.fake" fakefile.write('') self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(dir1)] depth, result = data self.cli.invoke(self.args + ["--depth=%s" % depth], strict=True) o, e = capfd.readouterr() if result: assert str(fakefile) in str(o) else: assert str(fakefile) not in str(o) def testImportFakeImage(self, tmpdir, capfd): """Test fake image import""" fakefile = tmpdir.join("test.fake") fakefile.write('') self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(fakefile)] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FakeReader' assert outputlines[-2] == str(fakefile) assert outputlines[-3] == \ "# Group: %s SPW: false Reader: %s" % (str(fakefile), reader) @pytest.mark.parametrize('with_ds_store', (True, False)) def
(self, tmpdir, capfd, with_ds_store): """Test fake screen import""" screen_dir = tmpdir.join("screen.fake") screen_dir.mkdir() fieldfiles = self.mkfakescreen( screen_dir, with_ds_store=with_ds_store) self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(fieldfiles[0])] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FakeReader' assert outputlines[-len(fieldfiles)-2] == \ "# Group: %s SPW: true Reader: %s" % (str(fieldfiles[0]), reader) for i in range(len(fieldfiles)): assert outputlines[-1-len(fieldfiles)+i] == str(fieldfiles[i]) def testImportPattern(self, tmpdir, capfd): """Test pattern import""" patternfile, tiffiles = self.mkfakepattern(tmpdir) self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(patternfile)] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FilePatternReader' print o assert outputlines[-len(tiffiles)-3] == \ "# Group: %s SPW: false Reader: %s" % (str(patternfile), reader) assert outputlines[-len(tiffiles)-2] == str(patternfile) for i in range(len(tiffiles)): assert outputlines[-1-len(tiffiles)+i] == str(tiffiles[i]) @pytest.mark.parametrize('hostname', ['localhost', 'servername']) @pytest.mark.parametrize('port', [None, 4064, 14064]) def testLoginArguments(self, monkeypatch, hostname, port, tmpdir): self.args += ['test.fake'] control = self.cli.controls['import'] control.command_args = [] sessionid = str(uuid.uuid4()) def new_client(x): if port: c = MockClient(hostname, port) else: c = MockClient(hostname) c.setSessionId(sessionid) return c monkeypatch.setattr(self.cli, 'conn', new_client) ice_config = tmpdir / 'ice.config' ice_config.write('omero.host=%s\nomero.port=%g' % ( hostname, (port or 4064))) monkeypatch.setenv("ICE_CONFIG", ice_config) control.set_login_arguments(self.cli.parser.parse_args(self.args)) expected_args = ['-s', '%s' % hostname] expected_args += ['-p', '%s' % (port or 4064)] expected_args += ['-k', '%s' % sessionid] assert control.command_args == expected_args
testImportFakeScreen
identifier_name
test_import.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test of the omero import control. Copyright 2009 Glencoe Software, Inc. All rights reserved. Use is subject to license terms supplied in LICENSE.txt """ import pytest from path import path import omero.clients import uuid from omero.cli import CLI, NonZeroReturnCode # Workaround for a poorly named module plugin = __import__('omero.plugins.import', globals(), locals(), ['ImportControl'], -1) ImportControl = plugin.ImportControl help_arguments = ("-h", "--javahelp", "--java-help", "--advanced-help") class MockClient(omero.clients.BaseClient): def setSessionId(self, uuid): self._uuid = uuid def getSessionId(self): return self._uuid class TestImport(object): def setup_method(self, method): self.cli = CLI() self.cli.register("import", ImportControl, "TEST") self.args = ["import"] def add_client_dir(self): dist_dir = path(__file__) / ".." / ".." / ".." / ".." / ".." / ".." /\ ".." / "dist" # FIXME: should not be hard-coded dist_dir = dist_dir.abspath() client_dir = dist_dir / "lib" / "client" self.args += ["--clientdir", client_dir] def mkdir(self, parent, name, with_ds_store=False): child = parent / name child.mkdir() if with_ds_store: ds_store = child / ".DS_STORE" ds_store.write("") return child def mkfakescreen(self, screen_dir, nplates=2, nruns=2, nwells=2, nfields=4, with_ds_store=False): fieldfiles = [] for iplate in range(nplates): plate_dir = self.mkdir( screen_dir, "Plate00%s" % str(iplate), with_ds_store=with_ds_store) for irun in range(nruns): run_dir = self.mkdir( plate_dir, "Run00%s" % str(irun), with_ds_store=with_ds_store) for iwell in range(nwells):
return fieldfiles def mkfakepattern(self, tmpdir, nangles=7, ntimepoints=10): spim_dir = tmpdir.join("SPIM") spim_dir.mkdir() tiffiles = [] for angle in range(1, nangles + 1): for timepoint in range(1, ntimepoints + 1): tiffile = (spim_dir / ("spim_TL%s_Angle%s.fake" % (str(timepoint), str(angle)))) tiffile.write('') print str(tiffile) tiffiles.append(tiffile) patternfile = spim_dir / "spim.pattern" patternfile.write("spim_TL<1-%s>_Angle<1-%s>.fake" % (str(ntimepoints), str(nangles))) assert len(tiffiles) == nangles * ntimepoints return patternfile, tiffiles def testDropBoxArgs(self): class MockImportControl(ImportControl): def importer(this, args): assert args.server == "localhost" assert args.port == "4064" assert args.key == "b0742975-03a1-4f6d-b0ac-639943f1a147" assert args.errs == "/tmp/dropbox.err" assert args.file == "/tmp/dropbox.out" self.cli.register("mock-import", MockImportControl, "HELP") self.args = ['-s', 'localhost', '-p', '4064', '-k', 'b0742975-03a1-4f6d-b0ac-639943f1a147'] self.args += ['mock-import', '---errs=/tmp/dropbox.err'] self.args += ['---file=/tmp/dropbox.out'] self.args += ['--', '/OMERO/DropBox/root/tinyTest.d3d.dv'] self.cli.invoke(self.args) @pytest.mark.parametrize('help_argument', help_arguments) def testHelp(self, help_argument): """Test help arguments""" self.args += [help_argument] self.cli.invoke(self.args) @pytest.mark.parametrize('clientdir_exists', [True, False]) def testImportNoClientDirFails(self, tmpdir, clientdir_exists): """Test fake screen import""" fakefile = tmpdir.join("test.fake") fakefile.write('') if clientdir_exists: self.args += ["--clientdir", str(tmpdir)] self.args += [str(fakefile)] with pytest.raises(NonZeroReturnCode): self.cli.invoke(self.args, strict=True) @pytest.mark.parametrize("data", (("1", False), ("3", True))) def testImportDepth(self, tmpdir, capfd, data): """Test import using depth argument""" dir1 = tmpdir.join("a") dir1.mkdir() dir2 = dir1 / "b" dir2.mkdir() fakefile = dir2 / "test.fake" fakefile.write('') self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(dir1)] depth, result = data self.cli.invoke(self.args + ["--depth=%s" % depth], strict=True) o, e = capfd.readouterr() if result: assert str(fakefile) in str(o) else: assert str(fakefile) not in str(o) def testImportFakeImage(self, tmpdir, capfd): """Test fake image import""" fakefile = tmpdir.join("test.fake") fakefile.write('') self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(fakefile)] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FakeReader' assert outputlines[-2] == str(fakefile) assert outputlines[-3] == \ "# Group: %s SPW: false Reader: %s" % (str(fakefile), reader) @pytest.mark.parametrize('with_ds_store', (True, False)) def testImportFakeScreen(self, tmpdir, capfd, with_ds_store): """Test fake screen import""" screen_dir = tmpdir.join("screen.fake") screen_dir.mkdir() fieldfiles = self.mkfakescreen( screen_dir, with_ds_store=with_ds_store) self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(fieldfiles[0])] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FakeReader' assert outputlines[-len(fieldfiles)-2] == \ "# Group: %s SPW: true Reader: %s" % (str(fieldfiles[0]), reader) for i in range(len(fieldfiles)): assert outputlines[-1-len(fieldfiles)+i] == str(fieldfiles[i]) def testImportPattern(self, tmpdir, capfd): """Test pattern import""" patternfile, tiffiles = self.mkfakepattern(tmpdir) self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(patternfile)] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FilePatternReader' print o assert outputlines[-len(tiffiles)-3] == \ "# Group: %s SPW: false Reader: %s" % (str(patternfile), reader) assert outputlines[-len(tiffiles)-2] == str(patternfile) for i in range(len(tiffiles)): assert outputlines[-1-len(tiffiles)+i] == str(tiffiles[i]) @pytest.mark.parametrize('hostname', ['localhost', 'servername']) @pytest.mark.parametrize('port', [None, 4064, 14064]) def testLoginArguments(self, monkeypatch, hostname, port, tmpdir): self.args += ['test.fake'] control = self.cli.controls['import'] control.command_args = [] sessionid = str(uuid.uuid4()) def new_client(x): if port: c = MockClient(hostname, port) else: c = MockClient(hostname) c.setSessionId(sessionid) return c monkeypatch.setattr(self.cli, 'conn', new_client) ice_config = tmpdir / 'ice.config' ice_config.write('omero.host=%s\nomero.port=%g' % ( hostname, (port or 4064))) monkeypatch.setenv("ICE_CONFIG", ice_config) control.set_login_arguments(self.cli.parser.parse_args(self.args)) expected_args = ['-s', '%s' % hostname] expected_args += ['-p', '%s' % (port or 4064)] expected_args += ['-k', '%s' % sessionid] assert control.command_args == expected_args
well_dir = self.mkdir( run_dir, "WellA00%s" % str(iwell), with_ds_store=with_ds_store) for ifield in range(nfields): fieldfile = (well_dir / ("Field00%s.fake" % str(ifield))) fieldfile.write('') fieldfiles.append(fieldfile)
conditional_block
test_import.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test of the omero import control. Copyright 2009 Glencoe Software, Inc. All rights reserved. Use is subject to license terms supplied in LICENSE.txt """ import pytest from path import path import omero.clients import uuid from omero.cli import CLI, NonZeroReturnCode # Workaround for a poorly named module plugin = __import__('omero.plugins.import', globals(), locals(), ['ImportControl'], -1) ImportControl = plugin.ImportControl help_arguments = ("-h", "--javahelp", "--java-help", "--advanced-help") class MockClient(omero.clients.BaseClient): def setSessionId(self, uuid): self._uuid = uuid def getSessionId(self): return self._uuid class TestImport(object): def setup_method(self, method): self.cli = CLI() self.cli.register("import", ImportControl, "TEST") self.args = ["import"] def add_client_dir(self): dist_dir = path(__file__) / ".." / ".." / ".." / ".." / ".." / ".." /\ ".." / "dist" # FIXME: should not be hard-coded dist_dir = dist_dir.abspath() client_dir = dist_dir / "lib" / "client" self.args += ["--clientdir", client_dir] def mkdir(self, parent, name, with_ds_store=False): child = parent / name child.mkdir() if with_ds_store: ds_store = child / ".DS_STORE" ds_store.write("") return child def mkfakescreen(self, screen_dir, nplates=2, nruns=2, nwells=2, nfields=4, with_ds_store=False): fieldfiles = [] for iplate in range(nplates): plate_dir = self.mkdir( screen_dir, "Plate00%s" % str(iplate), with_ds_store=with_ds_store) for irun in range(nruns): run_dir = self.mkdir( plate_dir, "Run00%s" % str(irun), with_ds_store=with_ds_store) for iwell in range(nwells): well_dir = self.mkdir( run_dir, "WellA00%s" % str(iwell), with_ds_store=with_ds_store) for ifield in range(nfields): fieldfile = (well_dir / ("Field00%s.fake" % str(ifield))) fieldfile.write('') fieldfiles.append(fieldfile) return fieldfiles def mkfakepattern(self, tmpdir, nangles=7, ntimepoints=10): spim_dir = tmpdir.join("SPIM") spim_dir.mkdir() tiffiles = [] for angle in range(1, nangles + 1): for timepoint in range(1, ntimepoints + 1): tiffile = (spim_dir / ("spim_TL%s_Angle%s.fake" % (str(timepoint), str(angle)))) tiffile.write('') print str(tiffile) tiffiles.append(tiffile) patternfile = spim_dir / "spim.pattern" patternfile.write("spim_TL<1-%s>_Angle<1-%s>.fake" % (str(ntimepoints), str(nangles))) assert len(tiffiles) == nangles * ntimepoints return patternfile, tiffiles def testDropBoxArgs(self): class MockImportControl(ImportControl): def importer(this, args):
self.cli.register("mock-import", MockImportControl, "HELP") self.args = ['-s', 'localhost', '-p', '4064', '-k', 'b0742975-03a1-4f6d-b0ac-639943f1a147'] self.args += ['mock-import', '---errs=/tmp/dropbox.err'] self.args += ['---file=/tmp/dropbox.out'] self.args += ['--', '/OMERO/DropBox/root/tinyTest.d3d.dv'] self.cli.invoke(self.args) @pytest.mark.parametrize('help_argument', help_arguments) def testHelp(self, help_argument): """Test help arguments""" self.args += [help_argument] self.cli.invoke(self.args) @pytest.mark.parametrize('clientdir_exists', [True, False]) def testImportNoClientDirFails(self, tmpdir, clientdir_exists): """Test fake screen import""" fakefile = tmpdir.join("test.fake") fakefile.write('') if clientdir_exists: self.args += ["--clientdir", str(tmpdir)] self.args += [str(fakefile)] with pytest.raises(NonZeroReturnCode): self.cli.invoke(self.args, strict=True) @pytest.mark.parametrize("data", (("1", False), ("3", True))) def testImportDepth(self, tmpdir, capfd, data): """Test import using depth argument""" dir1 = tmpdir.join("a") dir1.mkdir() dir2 = dir1 / "b" dir2.mkdir() fakefile = dir2 / "test.fake" fakefile.write('') self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(dir1)] depth, result = data self.cli.invoke(self.args + ["--depth=%s" % depth], strict=True) o, e = capfd.readouterr() if result: assert str(fakefile) in str(o) else: assert str(fakefile) not in str(o) def testImportFakeImage(self, tmpdir, capfd): """Test fake image import""" fakefile = tmpdir.join("test.fake") fakefile.write('') self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(fakefile)] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FakeReader' assert outputlines[-2] == str(fakefile) assert outputlines[-3] == \ "# Group: %s SPW: false Reader: %s" % (str(fakefile), reader) @pytest.mark.parametrize('with_ds_store', (True, False)) def testImportFakeScreen(self, tmpdir, capfd, with_ds_store): """Test fake screen import""" screen_dir = tmpdir.join("screen.fake") screen_dir.mkdir() fieldfiles = self.mkfakescreen( screen_dir, with_ds_store=with_ds_store) self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(fieldfiles[0])] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FakeReader' assert outputlines[-len(fieldfiles)-2] == \ "# Group: %s SPW: true Reader: %s" % (str(fieldfiles[0]), reader) for i in range(len(fieldfiles)): assert outputlines[-1-len(fieldfiles)+i] == str(fieldfiles[i]) def testImportPattern(self, tmpdir, capfd): """Test pattern import""" patternfile, tiffiles = self.mkfakepattern(tmpdir) self.add_client_dir() self.args += ["-f", "--debug=ERROR"] self.args += [str(patternfile)] self.cli.invoke(self.args, strict=True) o, e = capfd.readouterr() outputlines = str(o).split('\n') reader = 'loci.formats.in.FilePatternReader' print o assert outputlines[-len(tiffiles)-3] == \ "# Group: %s SPW: false Reader: %s" % (str(patternfile), reader) assert outputlines[-len(tiffiles)-2] == str(patternfile) for i in range(len(tiffiles)): assert outputlines[-1-len(tiffiles)+i] == str(tiffiles[i]) @pytest.mark.parametrize('hostname', ['localhost', 'servername']) @pytest.mark.parametrize('port', [None, 4064, 14064]) def testLoginArguments(self, monkeypatch, hostname, port, tmpdir): self.args += ['test.fake'] control = self.cli.controls['import'] control.command_args = [] sessionid = str(uuid.uuid4()) def new_client(x): if port: c = MockClient(hostname, port) else: c = MockClient(hostname) c.setSessionId(sessionid) return c monkeypatch.setattr(self.cli, 'conn', new_client) ice_config = tmpdir / 'ice.config' ice_config.write('omero.host=%s\nomero.port=%g' % ( hostname, (port or 4064))) monkeypatch.setenv("ICE_CONFIG", ice_config) control.set_login_arguments(self.cli.parser.parse_args(self.args)) expected_args = ['-s', '%s' % hostname] expected_args += ['-p', '%s' % (port or 4064)] expected_args += ['-k', '%s' % sessionid] assert control.command_args == expected_args
assert args.server == "localhost" assert args.port == "4064" assert args.key == "b0742975-03a1-4f6d-b0ac-639943f1a147" assert args.errs == "/tmp/dropbox.err" assert args.file == "/tmp/dropbox.out"
identifier_body
pingping.py
#!/usr/bin/env python # -*- coding: latin-1 -*- import sys import datetime from threading import Thread class ProcPing(Thread): def __init__(self, name, data, qtdMsg): Thread.__init__(self) self.name = name self.data = data self.qtdMsg = qtdMsg self.mailBox = [] def setPeer(self, Peer): self.Peer = Peer def send(self, dado): self.Peer.mailBox = dado def recv(self):
def run(self): for i in range (0, self.qtdMsg + 1): self.send(self.data) if i < self.qtdMsg: self.recv() class PingPing(Thread): def __init__(self, tamMsg, qtdMsg): Thread.__init__(self) self.tamMsg = tamMsg self.qtdMsg = qtdMsg def run(self): index = 0 array = [1] while index < self.tamMsg -1: array.append(1) index = index + 1 p1 = ProcPing("1", array, self.qtdMsg) p2 = ProcPing("2", array, self.qtdMsg) p2.setPeer(p1) p1.setPeer(p2) timeStart = datetime.datetime.now() p1.start() p2.start() p1.join() p2.join() timeEnd = datetime.datetime.now() timeExec = timeEnd - timeStart line = "%d\t%d\t%s\n" % (self.tamMsg, self.qtdMsg, timeExec) try: arq = open('saida.txt', 'r') textoSaida = arq.read() arq.close() except: arq = open('saida.txt', 'w') textoSaida = "" arq.close() arq = open('saida.txt', 'w') textoSaida = textoSaida + line arq.write(textoSaida) arq.close() def main(): param = sys.argv[1:] tamMsg = int(param[0]) qtdMsg = int(param[1]) pingPing = PingPing(tamMsg, qtdMsg) pingPing.start() if __name__=="__main__": main()
while True: if not len(self.mailBox) < len(self.data): print(self) self.mailBox = [] break
identifier_body
pingping.py
#!/usr/bin/env python # -*- coding: latin-1 -*- import sys import datetime from threading import Thread class ProcPing(Thread): def __init__(self, name, data, qtdMsg): Thread.__init__(self) self.name = name self.data = data self.qtdMsg = qtdMsg self.mailBox = [] def
(self, Peer): self.Peer = Peer def send(self, dado): self.Peer.mailBox = dado def recv(self): while True: if not len(self.mailBox) < len(self.data): print(self) self.mailBox = [] break def run(self): for i in range (0, self.qtdMsg + 1): self.send(self.data) if i < self.qtdMsg: self.recv() class PingPing(Thread): def __init__(self, tamMsg, qtdMsg): Thread.__init__(self) self.tamMsg = tamMsg self.qtdMsg = qtdMsg def run(self): index = 0 array = [1] while index < self.tamMsg -1: array.append(1) index = index + 1 p1 = ProcPing("1", array, self.qtdMsg) p2 = ProcPing("2", array, self.qtdMsg) p2.setPeer(p1) p1.setPeer(p2) timeStart = datetime.datetime.now() p1.start() p2.start() p1.join() p2.join() timeEnd = datetime.datetime.now() timeExec = timeEnd - timeStart line = "%d\t%d\t%s\n" % (self.tamMsg, self.qtdMsg, timeExec) try: arq = open('saida.txt', 'r') textoSaida = arq.read() arq.close() except: arq = open('saida.txt', 'w') textoSaida = "" arq.close() arq = open('saida.txt', 'w') textoSaida = textoSaida + line arq.write(textoSaida) arq.close() def main(): param = sys.argv[1:] tamMsg = int(param[0]) qtdMsg = int(param[1]) pingPing = PingPing(tamMsg, qtdMsg) pingPing.start() if __name__=="__main__": main()
setPeer
identifier_name
pingping.py
#!/usr/bin/env python # -*- coding: latin-1 -*- import sys import datetime from threading import Thread class ProcPing(Thread): def __init__(self, name, data, qtdMsg): Thread.__init__(self) self.name = name self.data = data self.qtdMsg = qtdMsg self.mailBox = [] def setPeer(self, Peer): self.Peer = Peer def send(self, dado): self.Peer.mailBox = dado def recv(self): while True: if not len(self.mailBox) < len(self.data):
def run(self): for i in range (0, self.qtdMsg + 1): self.send(self.data) if i < self.qtdMsg: self.recv() class PingPing(Thread): def __init__(self, tamMsg, qtdMsg): Thread.__init__(self) self.tamMsg = tamMsg self.qtdMsg = qtdMsg def run(self): index = 0 array = [1] while index < self.tamMsg -1: array.append(1) index = index + 1 p1 = ProcPing("1", array, self.qtdMsg) p2 = ProcPing("2", array, self.qtdMsg) p2.setPeer(p1) p1.setPeer(p2) timeStart = datetime.datetime.now() p1.start() p2.start() p1.join() p2.join() timeEnd = datetime.datetime.now() timeExec = timeEnd - timeStart line = "%d\t%d\t%s\n" % (self.tamMsg, self.qtdMsg, timeExec) try: arq = open('saida.txt', 'r') textoSaida = arq.read() arq.close() except: arq = open('saida.txt', 'w') textoSaida = "" arq.close() arq = open('saida.txt', 'w') textoSaida = textoSaida + line arq.write(textoSaida) arq.close() def main(): param = sys.argv[1:] tamMsg = int(param[0]) qtdMsg = int(param[1]) pingPing = PingPing(tamMsg, qtdMsg) pingPing.start() if __name__=="__main__": main()
print(self) self.mailBox = [] break
conditional_block
pingping.py
#!/usr/bin/env python # -*- coding: latin-1 -*- import sys import datetime from threading import Thread class ProcPing(Thread): def __init__(self, name, data, qtdMsg): Thread.__init__(self) self.name = name self.data = data self.qtdMsg = qtdMsg self.mailBox = [] def setPeer(self, Peer): self.Peer = Peer def send(self, dado): self.Peer.mailBox = dado def recv(self): while True: if not len(self.mailBox) < len(self.data): print(self) self.mailBox = [] break def run(self): for i in range (0, self.qtdMsg + 1): self.send(self.data) if i < self.qtdMsg: self.recv() class PingPing(Thread): def __init__(self, tamMsg, qtdMsg): Thread.__init__(self) self.tamMsg = tamMsg self.qtdMsg = qtdMsg def run(self): index = 0 array = [1] while index < self.tamMsg -1: array.append(1) index = index + 1 p1 = ProcPing("1", array, self.qtdMsg) p2 = ProcPing("2", array, self.qtdMsg) p2.setPeer(p1) p1.setPeer(p2) timeStart = datetime.datetime.now() p1.start() p2.start() p1.join() p2.join() timeEnd = datetime.datetime.now() timeExec = timeEnd - timeStart line = "%d\t%d\t%s\n" % (self.tamMsg, self.qtdMsg, timeExec) try: arq = open('saida.txt', 'r') textoSaida = arq.read() arq.close() except: arq = open('saida.txt', 'w') textoSaida = "" arq.close() arq = open('saida.txt', 'w') textoSaida = textoSaida + line arq.write(textoSaida) arq.close() def main(): param = sys.argv[1:] tamMsg = int(param[0]) qtdMsg = int(param[1]) pingPing = PingPing(tamMsg, qtdMsg)
pingPing.start() if __name__=="__main__": main()
random_line_split
persengine.py
#!/usr/bin/env python2 """ This is the main module, used to launch the persistency engine """ #from persio import iohandler import persui.persinterface as ui def main(): """ Launches the user interface, and keeps it on.""" interface = ui.Persinterface() while True:
if __name__ == '__main__': main() """ def main_old(): keynames = ["A", "B"] graph_data1 = [(0, 0, 0, 1), (0, 1, 2, 3)] graph_data2 = [(2, 3, 0, 1), (0, 6, 2, 8)] graph_data = [graph_data1, graph_data2] name = "tree.xml" root = iohandler.xh.createindex(keynames) for i in xrange(2): iohandler.xh.creategraph(root, graph_data[i], keynames[i], 2) iohandler.xh.writexml(root, name) """
interface.run()
conditional_block
persengine.py
#!/usr/bin/env python2 """ This is the main module, used to launch the persistency engine """ #from persio import iohandler import persui.persinterface as ui
""" Launches the user interface, and keeps it on.""" interface = ui.Persinterface() while True: interface.run() if __name__ == '__main__': main() """ def main_old(): keynames = ["A", "B"] graph_data1 = [(0, 0, 0, 1), (0, 1, 2, 3)] graph_data2 = [(2, 3, 0, 1), (0, 6, 2, 8)] graph_data = [graph_data1, graph_data2] name = "tree.xml" root = iohandler.xh.createindex(keynames) for i in xrange(2): iohandler.xh.creategraph(root, graph_data[i], keynames[i], 2) iohandler.xh.writexml(root, name) """
def main():
random_line_split
persengine.py
#!/usr/bin/env python2 """ This is the main module, used to launch the persistency engine """ #from persio import iohandler import persui.persinterface as ui def main():
if __name__ == '__main__': main() """ def main_old(): keynames = ["A", "B"] graph_data1 = [(0, 0, 0, 1), (0, 1, 2, 3)] graph_data2 = [(2, 3, 0, 1), (0, 6, 2, 8)] graph_data = [graph_data1, graph_data2] name = "tree.xml" root = iohandler.xh.createindex(keynames) for i in xrange(2): iohandler.xh.creategraph(root, graph_data[i], keynames[i], 2) iohandler.xh.writexml(root, name) """
""" Launches the user interface, and keeps it on.""" interface = ui.Persinterface() while True: interface.run()
identifier_body
persengine.py
#!/usr/bin/env python2 """ This is the main module, used to launch the persistency engine """ #from persio import iohandler import persui.persinterface as ui def
(): """ Launches the user interface, and keeps it on.""" interface = ui.Persinterface() while True: interface.run() if __name__ == '__main__': main() """ def main_old(): keynames = ["A", "B"] graph_data1 = [(0, 0, 0, 1), (0, 1, 2, 3)] graph_data2 = [(2, 3, 0, 1), (0, 6, 2, 8)] graph_data = [graph_data1, graph_data2] name = "tree.xml" root = iohandler.xh.createindex(keynames) for i in xrange(2): iohandler.xh.creategraph(root, graph_data[i], keynames[i], 2) iohandler.xh.writexml(root, name) """
main
identifier_name
faq.js
$.FAQ = function(){ $self = this; this.url = "/faq" this.send = function(inputs){ var params = new FormData(); // var csrf = $("#csrf").val(); // params.append("csrf_ID", csrf); $.each(inputs, function(key, val){ params.append(key,val); }); $.ajax({ url : $self.url, type: 'POST', async: true, processData: false, data: params, success: function(response){ response = JSON.parse(response); $self.fillQAs(response.qadata); $self.createPagination(response.metadata) }, }); }; this.fillQAs = function(data){ var qaBox = $("#faq-container .faq-item").clone(); $("#faq-container .faq-item").remove(); $.each(data, function(obj){ var $div = qaBox.clone(); $div.find(".faq-item-question h2").html(obj.question); $div.find(".faq-item-answer p").html(obj.answer); }); }; this.createPagination = function(metadata){ }; this.load = function(data){ var limit = (data.limit > 0) ? data.limit : 5; var offset = (data.page_num - 1)*limit; var inputs = { action : 'loadFaq', limit : limit, offset : offset }; $self.send(inputs); }; this.init = function(){ var inputs = { limit : 5, page_num : 1
};
}; $self.load(inputs); };
random_line_split
lib.rs
use std::env; use std::error::Error; use std::fs; pub struct Config { pub query: String, pub filename: String, pub case_sensitive: bool, } // ANCHOR: here impl Config { pub fn new(mut args: env::Args) -> Result<Config, &'static str> { // --snip-- // ANCHOR_END: here if args.len() < 3 { return Err("not enough arguments"); } let query = args[1].clone(); let filename = args[2].clone(); let case_sensitive = env::var("CASE_INSENSITIVE").is_err(); Ok(Config { query, filename, case_sensitive, }) } } pub fn run(config: Config) -> Result<(), Box<dyn Error>> { let contents = fs::read_to_string(config.filename)?; let results = if config.case_sensitive { search(&config.query, &contents) } else
; for line in results { println!("{}", line); } Ok(()) } pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { let mut results = Vec::new(); for line in contents.lines() { if line.contains(query) { results.push(line); } } results } pub fn search_case_insensitive<'a>( query: &str, contents: &'a str, ) -> Vec<&'a str> { let query = query.to_lowercase(); let mut results = Vec::new(); for line in contents.lines() { if line.to_lowercase().contains(&query) { results.push(line); } } results } #[cfg(test)] mod tests { use super::*; #[test] fn case_sensitive() { let query = "duct"; let contents = "\ Rust: safe, fast, productive. Pick three. Duct tape."; assert_eq!(vec!["safe, fast, productive."], search(query, contents)); } #[test] fn case_insensitive() { let query = "rUsT"; let contents = "\ Rust: safe, fast, productive. Pick three. Trust me."; assert_eq!( vec!["Rust:", "Trust me."], search_case_insensitive(query, contents) ); } }
{ search_case_insensitive(&config.query, &contents) }
conditional_block
lib.rs
use std::env; use std::error::Error; use std::fs; pub struct Config { pub query: String, pub filename: String, pub case_sensitive: bool, } // ANCHOR: here impl Config { pub fn new(mut args: env::Args) -> Result<Config, &'static str> { // --snip-- // ANCHOR_END: here if args.len() < 3 { return Err("not enough arguments"); } let query = args[1].clone(); let filename = args[2].clone(); let case_sensitive = env::var("CASE_INSENSITIVE").is_err(); Ok(Config { query, filename, case_sensitive, }) } } pub fn run(config: Config) -> Result<(), Box<dyn Error>> { let contents = fs::read_to_string(config.filename)?; let results = if config.case_sensitive { search(&config.query, &contents)
}; for line in results { println!("{}", line); } Ok(()) } pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { let mut results = Vec::new(); for line in contents.lines() { if line.contains(query) { results.push(line); } } results } pub fn search_case_insensitive<'a>( query: &str, contents: &'a str, ) -> Vec<&'a str> { let query = query.to_lowercase(); let mut results = Vec::new(); for line in contents.lines() { if line.to_lowercase().contains(&query) { results.push(line); } } results } #[cfg(test)] mod tests { use super::*; #[test] fn case_sensitive() { let query = "duct"; let contents = "\ Rust: safe, fast, productive. Pick three. Duct tape."; assert_eq!(vec!["safe, fast, productive."], search(query, contents)); } #[test] fn case_insensitive() { let query = "rUsT"; let contents = "\ Rust: safe, fast, productive. Pick three. Trust me."; assert_eq!( vec!["Rust:", "Trust me."], search_case_insensitive(query, contents) ); } }
} else { search_case_insensitive(&config.query, &contents)
random_line_split
lib.rs
use std::env; use std::error::Error; use std::fs; pub struct Config { pub query: String, pub filename: String, pub case_sensitive: bool, } // ANCHOR: here impl Config { pub fn new(mut args: env::Args) -> Result<Config, &'static str>
} pub fn run(config: Config) -> Result<(), Box<dyn Error>> { let contents = fs::read_to_string(config.filename)?; let results = if config.case_sensitive { search(&config.query, &contents) } else { search_case_insensitive(&config.query, &contents) }; for line in results { println!("{}", line); } Ok(()) } pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { let mut results = Vec::new(); for line in contents.lines() { if line.contains(query) { results.push(line); } } results } pub fn search_case_insensitive<'a>( query: &str, contents: &'a str, ) -> Vec<&'a str> { let query = query.to_lowercase(); let mut results = Vec::new(); for line in contents.lines() { if line.to_lowercase().contains(&query) { results.push(line); } } results } #[cfg(test)] mod tests { use super::*; #[test] fn case_sensitive() { let query = "duct"; let contents = "\ Rust: safe, fast, productive. Pick three. Duct tape."; assert_eq!(vec!["safe, fast, productive."], search(query, contents)); } #[test] fn case_insensitive() { let query = "rUsT"; let contents = "\ Rust: safe, fast, productive. Pick three. Trust me."; assert_eq!( vec!["Rust:", "Trust me."], search_case_insensitive(query, contents) ); } }
{ // --snip-- // ANCHOR_END: here if args.len() < 3 { return Err("not enough arguments"); } let query = args[1].clone(); let filename = args[2].clone(); let case_sensitive = env::var("CASE_INSENSITIVE").is_err(); Ok(Config { query, filename, case_sensitive, }) }
identifier_body
lib.rs
use std::env; use std::error::Error; use std::fs; pub struct Config { pub query: String, pub filename: String, pub case_sensitive: bool, } // ANCHOR: here impl Config { pub fn new(mut args: env::Args) -> Result<Config, &'static str> { // --snip-- // ANCHOR_END: here if args.len() < 3 { return Err("not enough arguments"); } let query = args[1].clone(); let filename = args[2].clone(); let case_sensitive = env::var("CASE_INSENSITIVE").is_err(); Ok(Config { query, filename, case_sensitive, }) } } pub fn run(config: Config) -> Result<(), Box<dyn Error>> { let contents = fs::read_to_string(config.filename)?; let results = if config.case_sensitive { search(&config.query, &contents) } else { search_case_insensitive(&config.query, &contents) }; for line in results { println!("{}", line); } Ok(()) } pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { let mut results = Vec::new(); for line in contents.lines() { if line.contains(query) { results.push(line); } } results } pub fn search_case_insensitive<'a>( query: &str, contents: &'a str, ) -> Vec<&'a str> { let query = query.to_lowercase(); let mut results = Vec::new(); for line in contents.lines() { if line.to_lowercase().contains(&query) { results.push(line); } } results } #[cfg(test)] mod tests { use super::*; #[test] fn case_sensitive() { let query = "duct"; let contents = "\ Rust: safe, fast, productive. Pick three. Duct tape."; assert_eq!(vec!["safe, fast, productive."], search(query, contents)); } #[test] fn
() { let query = "rUsT"; let contents = "\ Rust: safe, fast, productive. Pick three. Trust me."; assert_eq!( vec!["Rust:", "Trust me."], search_case_insensitive(query, contents) ); } }
case_insensitive
identifier_name