repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
cbeighley/peregrine
peregrine/analysis/plotTrackingHigh.py
4
2031
#!/usr/bin/env python # Copyright (C) 2012 Swift Navigation Inc. # # This source is subject to the license found in the file 'LICENSE' which must # be be distributed together with this source. All other rights reserved. # # THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, # EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. import sys sys.path.append("..") import initSettings import argparse import numpy as np import pylab import pickle def plotTrackingHigh(trackResults, settings): fig = pylab.figure() fig.clf() if (settings.plotTrackingNumPts > len(trackResults[0].I_P)): x_pts = [i*0.001 for i in range(len(trackResults[0].I_P))] else: x_pts = [i*0.001 for i in range(settings.plotTrackingNumPts)] colors = [(0,0,0),\ (0,0,1),\ (0,1,0),\ (0,1,1),\ (1,0,0),\ (1,0,1),\ (1,1,0),\ (0,0,0.5),\ (0,0.5,0),\ (0,0.5,0.5),\ (0.5,0,0),\ (0.5,0,0.5),\ (0.5,0.5,0),\ (0.5,0.5,0.5)] pylab.title("Prompt correlation magnitude of each channel") pylab.xlabel("Time") pylab.hold(True) for channelNr in range(len(trackResults)): pylab.plot(x_pts,\ np.sqrt(np.square(trackResults[channelNr].I_P[0:len(x_pts)])\ + np.square(trackResults[channelNr].Q_P[0:len(x_pts)])),\ color=colors[channelNr], label=("PRN %2d" % (trackResults[channelNr].PRN))) pylab.legend() pylab.hold(False) return fig if __name__ == "__main__": settings = initSettings.initSettings() parser = argparse.ArgumentParser() parser.add_argument("file", help="the tracking results file to analyse") args = parser.parse_args() settings.fileName = args.file with open(settings.fileName, "rb") as f: trackResults, channel = pickle.load(f) fig = plotTrackingHigh(trackResults, settings) pylab.show()
gpl-3.0
wakiyamap/electrum-mona
electrum_mona/gui/kivy/uix/dialogs/dscancel_dialog.py
1
3468
from typing import TYPE_CHECKING, Optional from kivy.app import App from kivy.factory import Factory from kivy.properties import ObjectProperty from kivy.lang import Builder from electrum_mona.gui.kivy.i18n import _ if TYPE_CHECKING: from ...main_window import ElectrumWindow Builder.load_string(''' <DSCancelDialog@Popup> title: _('Cancel (double-spend) transaction') size_hint: 0.8, 0.8 pos_hint: {'top':0.9} BoxLayout: orientation: 'vertical' padding: '10dp' GridLayout: height: self.minimum_height size_hint_y: None cols: 1 spacing: '10dp' BoxLabel: id: old_fee text: _('Current Fee') value: '' BoxLabel: id: old_feerate text: _('Current Fee rate') value: '' Label: id: tooltip1 text: '' size_hint_y: None Label: id: tooltip2 text: '' size_hint_y: None Slider: id: slider range: 0, 4 step: 1 on_value: root.on_slider(self.value) Widget: size_hint: 1, 1 BoxLayout: orientation: 'horizontal' size_hint: 1, 0.5 Button: text: 'Cancel' size_hint: 0.5, None height: '48dp' on_release: root.dismiss() Button: text: 'OK' size_hint: 0.5, None height: '48dp' on_release: root.dismiss() root.on_ok() ''') class DSCancelDialog(Factory.Popup): def __init__(self, app: 'ElectrumWindow', fee, size, callback): Factory.Popup.__init__(self) self.app = app self.init_fee = fee self.tx_size = size self.callback = callback self.config = app.electrum_config self.mempool = self.config.use_mempool_fees() self.dynfees = self.config.is_dynfee() and bool(self.app.network) and self.config.has_dynamic_fees_ready() self.ids.old_fee.value = self.app.format_amount_and_units(self.init_fee) self.ids.old_feerate.value = self.app.format_fee_rate(fee / self.tx_size * 1000) self.update_slider() self.update_text() def update_text(self): pos = int(self.ids.slider.value) new_fee_rate = self.get_fee_rate() text, tooltip = self.config.get_fee_text(pos, self.dynfees, self.mempool, new_fee_rate) self.ids.tooltip1.text = text self.ids.tooltip2.text = tooltip def update_slider(self): slider = self.ids.slider maxp, pos, fee_rate = self.config.get_fee_slider(self.dynfees, self.mempool) slider.range = (0, maxp) slider.step = 1 slider.value = pos def get_fee_rate(self) -> Optional[int]: pos = int(self.ids.slider.value) if self.dynfees: fee_rate = self.config.depth_to_fee(pos) if self.mempool else self.config.eta_to_fee(pos) else: fee_rate = self.config.static_fee(pos) return fee_rate # sat/kbyte def on_ok(self): fee_per_kb = self.get_fee_rate() new_fee_rate = fee_per_kb / 1000 if fee_per_kb is not None else None self.callback(new_fee_rate) def on_slider(self, value): self.update_text()
mit
tylerbrazier/archive
datamining/assign3Kmeans/kmeans.py
1
3230
#!/usr/bin/python2 # Not very optimized import sys import math import random import matplotlib.pyplot as plot import numpy def dist(pointA, pointB): "pointA and pointB should be lists" total = 0 for i in range(0, len(pointA)): total += (pointA[i] - pointB[i])**2 return math.sqrt(total) def findClosest(point, meanPoints): ''' returns the index of the mean point in meanPoints that the point argument is closest to. ''' index = 0 shortestDist = dist(point, meanPoints[0]) for i in range(1, len(meanPoints)): currentDist = dist(point, meanPoints[i]) if currentDist < shortestDist: shortestDist = currentDist index = i return index def findCentroid(points): "argument is a list of lists; returns a list (point)" totals = [0 for attr in points[0]] # holds total for each point attribute for point in points: for i in range(0, len(point)): totals[i] += point[i] centroid = [] for i in range(0, len(totals)): centroid.append(totals[i] / len(points)) return centroid ''' old implementation totalX = 0 totalY = 0 for point in points: totalX += point.x totalY += point.y return Point( (totalX / len(points)), (totalY / len(points)) ) ''' filename = sys.argv[1] k = int(sys.argv[2]) data = numpy.loadtxt(filename) meanPoints = [] # find initial random means maxAttrs = [0 for i in data[1]] for point in data: for i in range(0, len(point)): if point[i] > maxAttrs[i]: maxAttrs[i] = point[i] for i in range(0, k): randPoint = [] for maxAttr in maxAttrs: randPoint.append(random.random() * maxAttr) meanPoints.append(randPoint) maxIterations = 20 epsilonThreshold = 0.00001 delta = 1 iterations = 0 while iterations < maxIterations and delta > epsilonThreshold: delta = 0 # assign points to means memberships = [ [] for i in range(0, k) ] # [ [], [] ] when k = 2 membersToPrint = [] # for the report of which points belong where for point in data: memberships[findClosest(point, meanPoints)].append(point) membersToPrint.append(findClosest(point, meanPoints)) # update mean points previousMeanPoints = meanPoints meanPoints = [] for group in memberships: if len(group) != 0: meanPoints.append(findCentroid(group)) # calculate delta for i in range(0, len(meanPoints)): delta += dist(meanPoints[i], previousMeanPoints[i]) iterations += 1 # report print "mean points :", meanPoints for i in range(0, len(memberships)): print "number of points in cluster", i, ":", len(memberships[i]) print "number of iterations :", iterations print "delta :", delta print "membership :", membersToPrint # plot 2d data if data.shape[1] == 2: xs = [] ys = [] for point in data: xs.append(point[0]) ys.append(point[1]) meanXs = [] meanYs = [] for point in meanPoints: meanXs.append(point[0]) meanYs.append(point[1]) plot.plot(xs, ys, 'ro', meanXs, meanYs, 'bs') plot.axis([0, round(max(xs)) + 1, 0, round(max(ys)) + 1]) plot.show()
mit
mericon/Xp_Kernel_LGH850
virt/tools/perf/scripts/python/failed-syscalls-by-pid.py
1996
2233
# failed system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): raw_syscalls__sys_exit(**locals()) def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
nitzmahone/ansible
lib/ansible/modules/network/avi/avi_authprofile.py
31
5020
#!/usr/bin/python # # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # Avi Version: 17.1.1 # # Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_authprofile author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com> short_description: Module for setup of AuthProfile Avi RESTful Object description: - This module is used to configure AuthProfile object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.4" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent", "present"] avi_api_update_method: description: - Default method for object update is HTTP PUT. - Setting to patch will override that behavior to use HTTP PATCH. version_added: "2.5" default: put choices: ["put", "patch"] avi_api_patch_op: description: - Patch operation to use when using avi_api_update_method as patch. version_added: "2.5" choices: ["add", "replace", "delete"] description: description: - User defined description for the object. http: description: - Http user authentication params. ldap: description: - Ldap server and directory settings. name: description: - Name of the auth profile. required: true saml: description: - Saml settings. - Field introduced in 17.2.3. version_added: "2.5" tacacs_plus: description: - Tacacs+ settings. tenant_ref: description: - It is a reference to an object of type tenant. type: description: - Type of the auth profile. - Enum options - AUTH_PROFILE_LDAP, AUTH_PROFILE_TACACS_PLUS, AUTH_PROFILE_SAML. required: true url: description: - Avi controller URL of the object. uuid: description: - Uuid of the auth profile. extends_documentation_fragment: - avi ''' EXAMPLES = """ - name: Create user authorization profile based on the LDAP avi_authprofile: controller: '{{ controller }}' password: '{{ password }}' username: '{{ username }}' http: cache_expiration_time: 5 group_member_is_full_dn: false ldap: base_dn: dc=avi,dc=local bind_as_administrator: true port: 389 security_mode: AUTH_LDAP_SECURE_NONE server: - 10.10.0.100 settings: admin_bind_dn: user@avi.local group_filter: (objectClass=*) group_member_attribute: member group_member_is_full_dn: true group_search_dn: dc=avi,dc=local group_search_scope: AUTH_LDAP_SCOPE_SUBTREE ignore_referrals: true password: password user_id_attribute: samAccountname user_search_dn: dc=avi,dc=local user_search_scope: AUTH_LDAP_SCOPE_ONE name: ProdAuth tenant_ref: admin type: AUTH_PROFILE_LDAP """ RETURN = ''' obj: description: AuthProfile (api/authprofile) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.network.avi.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), avi_api_update_method=dict(default='put', choices=['put', 'patch']), avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), description=dict(type='str',), http=dict(type='dict',), ldap=dict(type='dict',), name=dict(type='str', required=True), saml=dict(type='dict',), tacacs_plus=dict(type='dict',), tenant_ref=dict(type='str',), type=dict(type='str', required=True), url=dict(type='str',), uuid=dict(type='str',), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'authprofile', set([])) if __name__ == '__main__': main()
gpl-3.0
ForkedReposBak/mxnet
python/mxnet/numpy/_register.py
9
1092
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Registering ops in mxnet.numpy for imperative programming.""" from ..base import _init_np_op_module from ..ndarray.register import _make_ndarray_function _init_np_op_module(root_module_name='mxnet', np_module_name='numpy', mx_module_name=None, make_op_func=_make_ndarray_function)
apache-2.0
ashray/VTK-EVM
IO/EnSight/Testing/Python/EnSightSelectArrays.py
20
1547
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # create a rendering window and renderer ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) renWin.StereoCapableWindowOn() iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) reader = vtk.vtkGenericEnSightReader() # Make sure all algorithms use the composite data pipeline cdp = vtk.vtkCompositeDataPipeline() reader.SetDefaultExecutivePrototype(cdp) reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/blow1_ascii.case") reader.SetTimeValue(1) reader.ReadAllVariablesOff() reader.SetPointArrayStatus("displacement",1) reader.SetCellArrayStatus("thickness",1) reader.SetCellArrayStatus("displacement",1) geom = vtk.vtkGeometryFilter() geom.SetInputConnection(reader.GetOutputPort()) mapper = vtk.vtkHierarchicalPolyDataMapper() mapper.SetInputConnection(geom.GetOutputPort()) mapper.SetScalarRange(0.5,1.0) actor = vtk.vtkActor() actor.SetMapper(mapper) # assign our actor to the renderer ren1.AddActor(actor) # enable user interface interactor iren.Initialize() ren1.GetActiveCamera().SetPosition(99.3932,17.6571,-22.6071) ren1.GetActiveCamera().SetFocalPoint(3.5,12,1.5) ren1.GetActiveCamera().SetViewAngle(30) ren1.GetActiveCamera().SetViewUp(0.239617,-0.01054,0.97081) ren1.ResetCameraClippingRange() renWin.Render() # prevent the tk window from showing up then start the event loop reader.SetDefaultExecutivePrototype(None) # --- end of script --
bsd-3-clause
danieljaouen/ansible
test/units/modules/network/nxos/test_nxos_ospf_vrf.py
57
2771
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_ospf_vrf from .nxos_module import TestNxosModule, set_module_args class TestNxosOspfVrfModule(TestNxosModule): module = nxos_ospf_vrf def setUp(self): super(TestNxosOspfVrfModule, self).setUp() self.mock_load_config = patch('ansible.modules.network.nxos.nxos_ospf_vrf.load_config') self.load_config = self.mock_load_config.start() self.mock_get_config = patch('ansible.modules.network.nxos.nxos_ospf_vrf.get_config') self.get_config = self.mock_get_config.start() def tearDown(self): super(TestNxosOspfVrfModule, self).tearDown() self.mock_load_config.stop() self.mock_get_config.stop() def load_fixtures(self, commands=None, device=''): self.load_config.return_value = None def test_nxos_ospf_vrf_present(self): set_module_args(dict(ospf=1, vrf='test', timer_throttle_spf_start=50, timer_throttle_spf_hold=1000, timer_throttle_spf_max=2000, timer_throttle_lsa_start=60, timer_throttle_lsa_hold=1100, timer_throttle_lsa_max=3000, state='present')) result = self.execute_module(changed=True) self.assertEqual(sorted(result['commands']), sorted(['router ospf 1', 'vrf test', 'timers throttle lsa 60 1100 3000', 'timers throttle spf 50 1000 2000', 'vrf test'])) def test_nxos_ospf_vrf_absent(self): set_module_args(dict(ospf=1, vrf='test', state='absent')) result = self.execute_module(changed=False) self.assertEqual(result['commands'], [])
gpl-3.0
dlyle65535/incubator-metron
metron-deployment/roles/opentaxii/templates/collection-status.py
25
1447
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from cabby import create_client try: # create a connection client = create_client(host='{{ opentaxii_host }}', port='{{ opentaxii_port }}', discovery_path='/services/discovery') # iterate through each defined collection collections = client.get_collections(uri='{{ opentaxii_domain }}/services/collection') for collection in collections: # how many records in each collection? count = client.get_content_count(collection_name=collection.name, uri='{{ opentaxii_domain }}/services/poll') print "%-50s %-10d" % (collection.name, count.count) except: print "Services not defined"
apache-2.0
jmcarbo/openerp7
openerp/addons/report_webkit/ir_report.py
49
5990
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com) # All Right Reserved # # Author : Nicolas Bessi (Camptocamp) # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## from openerp.osv import fields, osv from openerp import netsvc from webkit_report import WebKitParser from openerp.report.report_sxw import rml_parse def register_report(name, model, tmpl_path, parser=rml_parse): """Register the report into the services""" name = 'report.%s' % name if netsvc.Service._services.get(name, False): service = netsvc.Service._services[name] if isinstance(service, WebKitParser): #already instantiated properly, skip it return if hasattr(service, 'parser'): parser = service.parser del netsvc.Service._services[name] WebKitParser(name, model, tmpl_path, parser=parser) class ReportXML(osv.osv): def __init__(self, pool, cr): super(ReportXML, self).__init__(pool, cr) def register_all(self,cursor): value = super(ReportXML, self).register_all(cursor) cursor.execute("SELECT * FROM ir_act_report_xml WHERE report_type = 'webkit'") records = cursor.dictfetchall() for record in records: register_report(record['report_name'], record['model'], record['report_rml']) return value def unlink(self, cursor, user, ids, context=None): """Delete report and unregister it""" trans_obj = self.pool.get('ir.translation') trans_ids = trans_obj.search( cursor, user, [('type', '=', 'report'), ('res_id', 'in', ids)] ) trans_obj.unlink(cursor, user, trans_ids) # Warning: we cannot unregister the services at the moment # because they are shared across databases. Calling a deleted # report will fail so it's ok. res = super(ReportXML, self).unlink( cursor, user, ids, context ) return res def create(self, cursor, user, vals, context=None): "Create report and register it" res = super(ReportXML, self).create(cursor, user, vals, context) if vals.get('report_type','') == 'webkit': # I really look forward to virtual functions :S register_report( vals['report_name'], vals['model'], vals.get('report_rml', False) ) return res def write(self, cr, uid, ids, vals, context=None): "Edit report and manage it registration" if isinstance(ids, (int, long)): ids = [ids,] for rep in self.browse(cr, uid, ids, context=context): if rep.report_type != 'webkit': continue if vals.get('report_name', False) and \ vals['report_name'] != rep.report_name: report_name = vals['report_name'] else: report_name = rep.report_name register_report( report_name, vals.get('model', rep.model), vals.get('report_rml', rep.report_rml) ) res = super(ReportXML, self).write(cr, uid, ids, vals, context) return res _name = 'ir.actions.report.xml' _inherit = 'ir.actions.report.xml' _columns = { 'webkit_header': fields.property( 'ir.header_webkit', type='many2one', relation='ir.header_webkit', string='Webkit Header', help="The header linked to the report", view_load=True, required=True ), 'webkit_debug' : fields.boolean('Webkit debug', help="Enable the webkit engine debugger"), 'report_webkit_data': fields.text('Webkit Template', help="This template will be used if the main report file is not found"), 'precise_mode':fields.boolean('Precise Mode', help='This mode allow more precise element \ position as each object is printed on a separate HTML.\ but memory and disk usage is wider') } ReportXML() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
afriestad/WikiLinks
dataporten/oauth2/views.py
1
2458
import requests from allauth.socialaccount.providers.base import ProviderException from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView from .provider import DataportenProvider class DataportenAdapter(OAuth2Adapter): provider_id = DataportenProvider.id access_token_url = 'https://auth.dataporten.no/oauth/token' authorize_url = 'https://auth.dataporten.no/oauth/authorization' profile_url = 'https://auth.dataporten.no/userinfo' groups_url = 'https://groups-api.dataporten.no/groups/' def complete_login(self, request, app, token, **kwargs): ''' Arguments: request - The get request to the callback URL /accounts/dataporten/login/callback. app - The corresponding SocialApp model instance token - A token object with access token given in token.token Returns: Should return a dict with user information intended for parsing by the methods of the DataportenProvider view, i.e. extract_uid(), extract_extra_data(), and extract_common_fields() ''' # The athentication header headers = {'Authorization': 'Bearer ' + token.token} # Userinfo endpoint, for documentation see: # https://docs.dataporten.no/docs/oauth-authentication/ userinfo_response = requests.get( self.profile_url, headers=headers, ) # Raise exception for 4xx and 5xx response codes userinfo_response.raise_for_status() # The endpoint returns json-data and it needs to be decoded extra_data = userinfo_response.json()['user'] # Finally test that the audience property matches the client id # for validification reasons, as instructed by the Dataporten docs # if the userinfo-response is used for authentication if userinfo_response.json()['audience'] != app.client_id: raise ProviderException( 'Dataporten returned a user with an audience field \ which does not correspond to the client id of the \ application.' ) return self.get_provider().sociallogin_from_response( request, extra_data, ) oauth2_login = OAuth2LoginView.adapter_view(DataportenAdapter) oauth2_callback = OAuth2CallbackView.adapter_view(DataportenAdapter)
mit
hectormartinez/rougexstem
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/readability/readabilitytests.py
9
8348
from textanalyzer import * import math class ReadabilityTool: analyzedVars = {} text = "" lang = "" tests_given_lang = {} def __init__(self, text = ''): self.tests_given_lang['all'] = {} self.tests_given_lang['all']["ARI"] = self.ARI self.tests_given_lang['all']['Flesch Reading Ease'] = self.FleschReadingEase self.tests_given_lang['all']["Flesch-Kincaid Grade Level"] = self.FleschKincaidGradeLevel self.tests_given_lang['all']["Gunning Fog Index"] = self.GunningFogIndex self.tests_given_lang['all']["SMOG Index"] = self.SMOGIndex self.tests_given_lang['all']['Coleman Liau Index'] = self.ColemanLiauIndex self.tests_given_lang['all']['LIX'] = self.LIX self.tests_given_lang['all']['RIX'] = self.RIX self.tests_given_lang['eng'] = {} self.tests_given_lang['eng']["ARI"] = self.ARI self.tests_given_lang['eng']['Flesch Reading Ease'] = self.FleschReadingEase self.tests_given_lang['eng']["Flesch-Kincaid Grade Level"] = self.FleschKincaidGradeLevel self.tests_given_lang['eng']["Gunning Fog Index"] = self.GunningFogIndex self.tests_given_lang['eng']["SMOG Index"] = self.SMOGIndex self.tests_given_lang['eng']['Coleman Liau Index'] = self.ColemanLiauIndex self.tests_given_lang['eng']['LIX'] = self.LIX self.tests_given_lang['eng']['RIX'] = self.RIX self.tests_given_lang['no'] = {} self.tests_given_lang['no']["ARI"] = self.ARI self.tests_given_lang['no']['Coleman Liau Index'] = self.ColemanLiauIndex self.tests_given_lang['no']['LIX'] = self.LIX self.tests_given_lang['no']['RIX'] = self.RIX if text != '': self.__analyzeText(text) def __analyzeText(self, text=''): if text != '': if text != self.text: self.text = text lang = NaiveBayes().classifyText(text) self.lang = lang t = textanalyzer(lang) t.analyzeText(text) words = t.getWords(text) charCount = t.getCharacterCount(words) wordCount = len(words) sentenceCount = len(t.getSentences(text)) syllableCount = t.countSyllables(words) complexwordsCount = t.countComplexWords(text) averageWordsPerSentence = wordCount/sentenceCount analyzedVars = {} analyzedVars['words'] = words analyzedVars['charCount'] = float(charCount) analyzedVars['wordCount'] = float(wordCount) analyzedVars['sentenceCount'] = float(sentenceCount) analyzedVars['syllableCount'] = float(syllableCount) analyzedVars['complexwordCount'] = float(complexwordsCount) analyzedVars['averageWordsPerSentence'] = float(averageWordsPerSentence) self.analyzedVars = analyzedVars def ARI(self, text = ''): self.__analyzeText(text) score = 0.0 analyzedVars = self.analyzedVars score = 4.71 * (analyzedVars['charCount'] / analyzedVars['wordCount']) + 0.5 * (analyzedVars['wordCount'] / analyzedVars['sentenceCount']) - 21.43 return score def FleschReadingEase(self, text = ''): self.__analyzeText(text) score = 0.0 analyzedVars = self.analyzedVars score = 206.835 - (1.015 * (analyzedVars['averageWordsPerSentence'])) - (84.6 * (analyzedVars['syllableCount']/ analyzedVars['wordCount'])) return score def FleschKincaidGradeLevel(self, text = ''): self.__analyzeText(text) score = 0.0 analyzedVars = self.analyzedVars score = 0.39 * (analyzedVars['averageWordsPerSentence']) + 11.8 * (analyzedVars['syllableCount']/ analyzedVars['wordCount']) - 15.59 return score def GunningFogIndex(self, text = ''): self.__analyzeText(text) score = 0.0 analyzedVars = self.analyzedVars score = 0.4 * ((analyzedVars['averageWordsPerSentence']) + (100 * (analyzedVars['complexwordCount']/analyzedVars['wordCount']))) return score def SMOGIndex(self, text = ''): self.__analyzeText(text) score = 0.0 analyzedVars = self.analyzedVars score = (math.sqrt(analyzedVars['complexwordCount']*(30/analyzedVars['sentenceCount'])) + 3) return score def ColemanLiauIndex(self, text = ''): self.__analyzeText(text) score = 0.0 analyzedVars = self.analyzedVars score = (5.89*(analyzedVars['charCount']/analyzedVars['wordCount']))-(30*(analyzedVars['sentenceCount']/analyzedVars['wordCount']))-15.8 return score def LIX(self, text = ''): self.__analyzeText(text) analyzedVars = self.analyzedVars score = 0.0 longwords = 0.0 for word in analyzedVars['words']: if len(word) >= 7: longwords += 1.0 score = analyzedVars['wordCount'] / analyzedVars['sentenceCount'] + float(100 * longwords) / analyzedVars['wordCount'] return score def RIX(self, text = ''): self.__analyzeText(text) analyzedVars = self.analyzedVars score = 0.0 longwords = 0.0 for word in analyzedVars['words']: if len(word) >= 7: longwords += 1.0 score = longwords / analyzedVars['sentenceCount'] return score def getReportAll(self, text = ''): self.__analyzeText(text) # ari = 0.0 # fleschEase = 0.0 # fleschGrade = 0.0 # gunningFog = 0.0 # smog = 0.0 # coleman = 0.0 # # ari = self.ARI() # fleschEase = self.FleschReadingEase() # fleschGrade = self.FleschKincaidGradeLevel() # gunningFog = self.GunningFogIndex() # smog = self.SMOGIndex() # coleman = self.ColemanLiauIndex() # lix = self.LIX() # rix = self.RIX() # # print '*' * 70 # print ' ARI: %.1f' % ari # print ' Flesch Reading Ease: %.1f' % fleschEase # print ' FleschKincaid Grade Level: %.1f' % fleschGrade # print ' Gunning Fog: %.1f' % gunningFog # print ' SMOG Index: %.1f' % smog # print ' Coleman-Liau Index: %.1f' % coleman # print ' LIX : %.1f' % lix # print ' RIX : %.1f' % rix # print '*' * 70 print "=" * 100 print "Recommended tests for lang: %s" % self.lang print "=" * 100 for testname in self.tests_given_lang[self.lang].keys(): print testname + " : %.2f" % self.tests_given_lang[self.lang][testname](text) print "=" * 100 print "Other tests: (Warning! Use with care)" print "=" * 100 for testname in self.tests_given_lang["all"].keys(): if not self.tests_given_lang[self.lang].has_key(testname): print testname + " : %.2f" % self.tests_given_lang["all"][testname](text) def demo(self): self = ReadabilityTool() text = """ It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so nobly advanced. It is rather for us to be here dedicated to the great task remaining before us, that from these honored dead we take increased devotion to that cause for which they gave the last full measure of devotion, that we here highly resolve that these dead shall not have died in vain, that this nation, under God, shall have a new birth of freedom, and that government of the people, by the people, for the people, shall not perish from this earth. """ self.__analyzeText(text) self.getReportAll(text) demo = classmethod(demo) def demo(): ReadabilityTool.demo() if __name__ == "__main__": ReadabilityTool.demo()
apache-2.0
ahmadio/edx-platform
common/djangoapps/util/views.py
114
9959
import json import logging import sys from functools import wraps from django.conf import settings from django.core.validators import ValidationError, validate_email from django.views.decorators.csrf import requires_csrf_token from django.views.defaults import server_error from django.http import (Http404, HttpResponse, HttpResponseNotAllowed, HttpResponseServerError) import dogstats_wrapper as dog_stats_api from edxmako.shortcuts import render_to_response import zendesk from microsite_configuration import microsite import calc import track.views from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey log = logging.getLogger(__name__) def ensure_valid_course_key(view_func): """ This decorator should only be used with views which have argument course_key_string (studio) or course_id (lms). If course_key_string (studio) or course_id (lms) is not valid raise 404. """ @wraps(view_func) def inner(request, *args, **kwargs): course_key = kwargs.get('course_key_string') or kwargs.get('course_id') if course_key is not None: try: CourseKey.from_string(course_key) except InvalidKeyError: raise Http404 response = view_func(request, *args, **kwargs) return response return inner @requires_csrf_token def jsonable_server_error(request, template_name='500.html'): """ 500 error handler that serves JSON on an AJAX request, and proxies to the Django default `server_error` view otherwise. """ if request.is_ajax(): msg = {"error": "The edX servers encountered an error"} return HttpResponseServerError(json.dumps(msg)) else: return server_error(request, template_name=template_name) def calculate(request): ''' Calculator in footer of every page. ''' equation = request.GET['equation'] try: result = calc.evaluator({}, {}, equation) except: event = {'error': map(str, sys.exc_info()), 'equation': equation} track.views.server_track(request, 'error:calc', event, page='calc') return HttpResponse(json.dumps({'result': 'Invalid syntax'})) return HttpResponse(json.dumps({'result': str(result)})) class _ZendeskApi(object): def __init__(self): """ Instantiate the Zendesk API. All of `ZENDESK_URL`, `ZENDESK_USER`, and `ZENDESK_API_KEY` must be set in `django.conf.settings`. """ self._zendesk_instance = zendesk.Zendesk( settings.ZENDESK_URL, settings.ZENDESK_USER, settings.ZENDESK_API_KEY, use_api_token=True, api_version=2, # As of 2012-05-08, Zendesk is using a CA that is not # installed on our servers client_args={"disable_ssl_certificate_validation": True} ) def create_ticket(self, ticket): """ Create the given `ticket` in Zendesk. The ticket should have the format specified by the zendesk package. """ ticket_url = self._zendesk_instance.create_ticket(data=ticket) return zendesk.get_id_from_url(ticket_url) def update_ticket(self, ticket_id, update): """ Update the Zendesk ticket with id `ticket_id` using the given `update`. The update should have the format specified by the zendesk package. """ self._zendesk_instance.update_ticket(ticket_id=ticket_id, data=update) def _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info): """ Create a new user-requested Zendesk ticket. Once created, the ticket will be updated with a private comment containing additional information from the browser and server, such as HTTP headers and user state. Returns a boolean value indicating whether ticket creation was successful, regardless of whether the private comment update succeeded. """ zendesk_api = _ZendeskApi() additional_info_string = ( "Additional information:\n\n" + "\n".join("%s: %s" % (key, value) for (key, value) in additional_info.items() if value is not None) ) # Tag all issues with LMS to distinguish channel in Zendesk; requested by student support team zendesk_tags = list(tags.values()) + ["LMS"] # Per edX support, we would like to be able to route white label feedback items # via tagging white_label_org = microsite.get_value('course_org_filter') if white_label_org: zendesk_tags = zendesk_tags + ["whitelabel_{org}".format(org=white_label_org)] new_ticket = { "ticket": { "requester": {"name": realname, "email": email}, "subject": subject, "comment": {"body": details}, "tags": zendesk_tags } } try: ticket_id = zendesk_api.create_ticket(new_ticket) except zendesk.ZendeskError as err: log.error("Error creating Zendesk ticket: %s", str(err)) return False # Additional information is provided as a private update so the information # is not visible to the user. ticket_update = {"ticket": {"comment": {"public": False, "body": additional_info_string}}} try: zendesk_api.update_ticket(ticket_id, ticket_update) except zendesk.ZendeskError as err: log.error("Error updating Zendesk ticket: %s", str(err)) # The update is not strictly necessary, so do not indicate failure to the user pass return True DATADOG_FEEDBACK_METRIC = "lms_feedback_submissions" def _record_feedback_in_datadog(tags): datadog_tags = [u"{k}:{v}".format(k=k, v=v) for k, v in tags.items()] dog_stats_api.increment(DATADOG_FEEDBACK_METRIC, tags=datadog_tags) def submit_feedback(request): """ Create a new user-requested ticket, currently implemented with Zendesk. If feedback submission is not enabled, any request will raise `Http404`. If any configuration parameter (`ZENDESK_URL`, `ZENDESK_USER`, or `ZENDESK_API_KEY`) is missing, any request will raise an `Exception`. The request must be a POST request specifying `subject` and `details`. If the user is not authenticated, the request must also specify `name` and `email`. If the user is authenticated, the `name` and `email` will be populated from the user's information. If any required parameter is missing, a 400 error will be returned indicating which field is missing and providing an error message. If Zendesk ticket creation fails, 500 error will be returned with no body; if ticket creation succeeds, an empty successful response (200) will be returned. """ if not settings.FEATURES.get('ENABLE_FEEDBACK_SUBMISSION', False): raise Http404() if request.method != "POST": return HttpResponseNotAllowed(["POST"]) if ( not settings.ZENDESK_URL or not settings.ZENDESK_USER or not settings.ZENDESK_API_KEY ): raise Exception("Zendesk enabled but not configured") def build_error_response(status_code, field, err_msg): return HttpResponse(json.dumps({"field": field, "error": err_msg}), status=status_code) additional_info = {} required_fields = ["subject", "details"] if not request.user.is_authenticated(): required_fields += ["name", "email"] required_field_errs = { "subject": "Please provide a subject.", "details": "Please provide details.", "name": "Please provide your name.", "email": "Please provide a valid e-mail.", } for field in required_fields: if field not in request.POST or not request.POST[field]: return build_error_response(400, field, required_field_errs[field]) subject = request.POST["subject"] details = request.POST["details"] tags = dict( [(tag, request.POST[tag]) for tag in ["issue_type", "course_id"] if tag in request.POST] ) if request.user.is_authenticated(): realname = request.user.profile.name email = request.user.email additional_info["username"] = request.user.username else: realname = request.POST["name"] email = request.POST["email"] try: validate_email(email) except ValidationError: return build_error_response(400, "email", required_field_errs["email"]) for header, pretty in [ ("HTTP_REFERER", "Page"), ("HTTP_USER_AGENT", "Browser"), ("REMOTE_ADDR", "Client IP"), ("SERVER_NAME", "Host") ]: additional_info[pretty] = request.META.get(header) success = _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info) _record_feedback_in_datadog(tags) return HttpResponse(status=(200 if success else 500)) def info(request): ''' Info page (link from main header) ''' return render_to_response("info.html", {}) # From http://djangosnippets.org/snippets/1042/ def parse_accept_header(accept): """Parse the Accept header *accept*, returning a list with pairs of (media_type, q_value), ordered by q values. """ result = [] for media_range in accept.split(","): parts = media_range.split(";") media_type = parts.pop(0) media_params = [] q = 1.0 for part in parts: (key, value) = part.lstrip().split("=", 1) if key == "q": q = float(value) else: media_params.append((key, value)) result.append((media_type, tuple(media_params), q)) result.sort(lambda x, y: -cmp(x[2], y[2])) return result def accepts(request, media_type): """Return whether this request has an Accept header that matches type""" accept = parse_accept_header(request.META.get("HTTP_ACCEPT", "")) return media_type in [t for (t, p, q) in accept]
agpl-3.0
MIPS/prebuilts-gcc-darwin-x86-x86-x86_64-linux-android-4.8
share/gdb/python/gdb/prompt.py
137
4210
# Extended prompt utilities. # Copyright (C) 2011-2013 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Extended prompt library functions.""" import gdb import os def _prompt_pwd(ignore): "The current working directory." return os.getcwdu() def _prompt_object_attr(func, what, attr, nattr): """Internal worker for fetching GDB attributes.""" if attr is None: attr = nattr try: obj = func() except gdb.error: return '<no %s>' % what if hasattr(obj, attr): result = getattr(obj, attr) if callable(result): result = result() return result else: return '<no attribute %s on current %s>' % (attr, what) def _prompt_frame(attr): "The selected frame; an argument names a frame parameter." return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name') def _prompt_thread(attr): "The selected thread; an argument names a thread parameter." return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num') def _prompt_version(attr): "The version of GDB." return gdb.VERSION def _prompt_esc(attr): "The ESC character." return '\033' def _prompt_bs(attr): "A backslash." return '\\' def _prompt_n(attr): "A newline." return '\n' def _prompt_r(attr): "A carriage return." return '\r' def _prompt_param(attr): "A parameter's value; the argument names the parameter." return gdb.parameter(attr) def _prompt_noprint_begin(attr): "Begins a sequence of non-printing characters." return '\001' def _prompt_noprint_end(attr): "Ends a sequence of non-printing characters." return '\002' prompt_substitutions = { 'e': _prompt_esc, '\\': _prompt_bs, 'n': _prompt_n, 'r': _prompt_r, 'v': _prompt_version, 'w': _prompt_pwd, 'f': _prompt_frame, 't': _prompt_thread, 'p': _prompt_param, '[': _prompt_noprint_begin, ']': _prompt_noprint_end } def prompt_help(): """Generate help dynamically from the __doc__ strings of attribute functions.""" result = '' keys = sorted (prompt_substitutions.keys()) for key in keys: result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__) result += """ A substitution can be used in a simple form, like "\\f". An argument can also be passed to it, like "\\f{name}". The meaning of the argument depends on the particular substitution.""" return result def substitute_prompt(prompt): "Perform substitutions on PROMPT." result = '' plen = len(prompt) i = 0 while i < plen: if prompt[i] == '\\': i = i + 1 if i >= plen: break cmdch = prompt[i] if cmdch in prompt_substitutions: cmd = prompt_substitutions[cmdch] if i + 1 < plen and prompt[i + 1] == '{': j = i + 1 while j < plen and prompt[j] != '}': j = j + 1 # Just ignore formatting errors. if j >= plen or prompt[j] != '}': arg = None else: arg = prompt[i + 2 : j] i = j else: arg = None result += str(cmd(arg)) else: # Unrecognized escapes are turned into the escaped # character itself. result += prompt[i] else: result += prompt[i] i = i + 1 return result
gpl-2.0
stefanhahmann/pybossa
test/test_uploader/test_rackspace_uploader.py
1
14235
# -*- coding: utf8 -*- # This file is part of PyBossa. # # Copyright (C) 2014 SF Isle of Man Limited # # PyBossa is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyBossa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PyBossa. If not, see <http://www.gnu.org/licenses/>. """This module tests the Uploader class.""" from default import Test, with_context from pybossa.uploader.rackspace import RackspaceUploader from mock import patch, PropertyMock, call, MagicMock from werkzeug.datastructures import FileStorage from pyrax.fakes import FakeContainer from pyrax.exceptions import NoSuchObject, NoSuchContainer from test_uploader import cloudfiles_mock, fake_container class TestRackspaceUploader(Test): """Test PyBossa Rackspace Uploader module.""" @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) def test_rackspace_uploader_init(self, Mock): """Test RACKSPACE UPLOADER init works.""" new_extensions = ['pdf', 'doe'] with patch('pybossa.uploader.rackspace.pyrax.cloudfiles', return_value=cloudfiles_mock): with patch.dict(self.flask_app.config, {'ALLOWED_EXTENSIONS': new_extensions}): with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: mycf.get_container.return_value = True u = RackspaceUploader() res = u.init_app(self.flask_app, cont_name='mycontainer') err_msg = "It should return the container." assert res is True, err_msg err_msg = "The container name should be updated." assert u.cont_name == 'mycontainer', err_msg for ext in new_extensions: err_msg = "The .%s extension should be allowed" % ext assert ext in u.allowed_extensions, err_msg @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) @patch('pybossa.uploader.rackspace.pyrax.utils.get_checksum', return_value="1234abcd") def test_rackspace_uploader_creates_container(self, mock, mock2): """Test RACKSPACE UPLOADER creates container works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: mycf.get_container.side_effect = NoSuchContainer mycf.create_container.return_value = True mycf.make_container_public.return_value = True u = RackspaceUploader() res = u.init_app(self.flask_app) err_msg = "Init app should return the container." assert res is True, err_msg @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) @patch('pybossa.uploader.rackspace.pyrax.utils.get_checksum', return_value="1234abcd") def test_rackspace_uploader_upload_correct_file(self, mock, mock2): """Test RACKSPACE UPLOADER upload file works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: mycf.upload_file.return_value=True mycf.get_object.side_effect = NoSuchObject u = RackspaceUploader() u.init_app(self.flask_app) file = FileStorage(filename='test.jpg') err_msg = "Upload file should return True" assert u.upload_file(file, container='user_3') is True, err_msg calls = [call.get_container('user_3'), call.get_container().get_object('test.jpg')] mycf.assert_has_calls(calls, any_order=True) @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) @patch('pybossa.uploader.rackspace.pyrax.utils.get_checksum', return_value="1234abcd") def test_rackspace_uploader_upload_correct_purgin_first_file(self, mock, mock2): """Test RACKSPACE UPLOADER upload file purging first file works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: mycf.upload_file.return_value=True mycf.get_object.side_effect = True u = RackspaceUploader() u.init_app(self.flask_app) file = FileStorage(filename='test.jpg') err_msg = "Upload file should return True" assert u.upload_file(file, container='user_3') is True, err_msg calls = [call.get_container('user_3'), call.get_container().get_object().delete(), call.get_container().get_object('test.jpg')] print mycf.mock_calls mycf.assert_has_calls(calls, any_order=True) @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) @patch('pybossa.uploader.rackspace.pyrax.utils.get_checksum', return_value="1234abcd") def test_rackspace_uploader_upload_file_fails(self, mock, mock2): """Test RACKSPACE UPLOADER upload file fail works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: from pyrax.exceptions import UploadFailed mycf.upload_file.side_effect = UploadFailed u = RackspaceUploader() u.init_app(self.flask_app) file = FileStorage(filename='test.jpg') err_msg = "Upload file should return False" assert u.upload_file(file, container='user_3') is False, err_msg @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) @patch('pybossa.uploader.rackspace.pyrax.utils.get_checksum', return_value="1234abcd") def test_rackspace_uploader_upload_file_object_fails(self, mock, mock2): """Test RACKSPACE UPLOADER upload file object fail works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: from pyrax.exceptions import NoSuchObject container = MagicMock() container.get_object.side_effect = NoSuchObject mycf.get_container.return_value = container u = RackspaceUploader() u.init_app(self.flask_app) file = FileStorage(filename='test.jpg') err_msg = "Upload file should return True" assert u.upload_file(file, container='user_3') is True, err_msg @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) @patch('pybossa.uploader.rackspace.pyrax.utils.get_checksum', return_value="1234abcd") def test_rackspace_uploader_upload_wrong_file(self, mock, mock2): """Test RACKSPACE UPLOADER upload wrong file extension works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: mycf.upload_file.return_value = True u = RackspaceUploader() u.init_app(self.flask_app) file = FileStorage(filename='test.docs') err_msg = "Upload file should return False" res = u.upload_file(file, container='user_3') assert res is False, err_msg @with_context @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) @patch('pybossa.uploader.rackspace.url_for', return_value='/static/img/placeholder.user.png') def test_rackspace_uploader_lookup_url(self, mock1, mock2): """Test RACKSPACE UPLOADER lookup returns a valid link.""" uri = 'http://rackspace.com' filename = 'test.jpg' with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: cdn_enabled_mock = PropertyMock(return_value=True) type(fake_container).cdn_enabled = cdn_enabled_mock mycf.get_container.return_value = fake_container u = RackspaceUploader() u.init_app(self.flask_app) res = u._lookup_url('rackspace', {'filename': filename, 'container': 'user_3'}) expected_url = "%s/%s" % (uri, filename) err_msg = "We should get the following URL: %s" % expected_url assert res == expected_url, err_msg @with_context @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) @patch('pybossa.uploader.rackspace.url_for', return_value='/static/img/placeholder.user.png') def test_rackspace_uploader_lookup_url_enable_cdn(self, mock1, mock2): """Test RACKSPACE UPLOADER lookup enables CDN for non enabled CDN.""" filename = 'test.jpg' with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: cdn_enabled_mock = PropertyMock(return_value=False) type(fake_container).cdn_enabled = cdn_enabled_mock mycf.get_container.return_value = fake_container u = RackspaceUploader() u.init_app(self.flask_app) res = u._lookup_url('rackspace', {'filename': filename, 'container': 'user_3'}) url = 'http://rackspace.com/test.jpg' err_msg = "We should get the %s but we got %s " % (url, res) assert res == url, err_msg calls = [call.make_public()] fake_container.assert_has_calls(calls, any_order=True) @with_context @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) def test_rackspace_uploader_lookup_url_returns_failover_url(self, mock): """Test RACKSPACE UPLOADER lookup returns failover_url for user avatar.""" filename = 'test.jpg' with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: cdn_enabled_mock = PropertyMock(return_value=False) type(fake_container).cdn_enabled = cdn_enabled_mock mycf.get_container.return_value = fake_container fake_container.make_public.side_effect = NoSuchObject u = RackspaceUploader() u.init_app(self.flask_app) res = u._lookup_url('rackspace', {'filename': filename, 'container': 'user_3'}) failover_url = 'http://localhost/static/img/placeholder.user.png' err_msg = "We should get the %s but we got %s " % (failover_url, res) assert res == failover_url, err_msg @with_context @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) def test_rackspace_uploader_lookup_url_returns_failover_url_project(self, mock): """Test RACKSPACE UPLOADER lookup returns failover_url for project avatar.""" filename = 'app_32.jpg' with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: cdn_enabled_mock = PropertyMock(return_value=False) type(fake_container).cdn_enabled = cdn_enabled_mock mycf.get_container.return_value = fake_container fake_container.make_public.side_effect = NoSuchObject u = RackspaceUploader() u.init_app(self.flask_app) res = u._lookup_url('rackspace', {'filename': filename, 'container': 'user_3'}) failover_url = 'http://localhost/static/img/placeholder.project.png' err_msg = "We should get the %s but we got %s " % (failover_url, res) assert res == failover_url, err_msg @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) def test_rackspace_uploader_get_container(self, mock1): """Test RACKSPACE UPLOADER get_container method works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: cdn_enabled_mock = PropertyMock(return_value=False) type(fake_container).cdn_enabled = cdn_enabled_mock mycf.get_container.side_effect = NoSuchContainer calls = [call.get_container('user_3'), call.create_container('user_3'), call.make_container_public('user_3') ] u = RackspaceUploader() u.init_app(self.flask_app) assert u.get_container('user_3') mycf.assert_has_calls(calls, any_order=True) @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) def test_rackspace_uploader_delete(self, mock1): """Test RACKSPACE UPLOADER delete method works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: calls = [call.get_container('container'), call.get_container().get_object('file'), call.get_container().get_object().delete() ] u = RackspaceUploader() u.init_app(self.flask_app) err_msg = "It should return True" assert u.delete_file('file', 'container') is True, err_msg mycf.assert_has_calls(calls, any_order=True) @patch('pybossa.uploader.rackspace.pyrax.set_credentials', return_value=True) def test_rackspace_uploader_delete_fails(self, mock1): """Test RACKSPACE UPLOADER delete fails method works.""" with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf: container = MagicMock() container.get_object.side_effect = NoSuchObject mycf.get_container.return_value = container calls = [call.get_container('container'), ] u = RackspaceUploader() u.init_app(self.flask_app) err_msg = "It should return False" assert u.delete_file('file', 'container') is False, err_msg mycf.assert_has_calls(calls, any_order=True)
agpl-3.0
carandraug/microscope
microscope/testsuite/devices.py
2
7283
#!/usr/bin/env python3 ## Copyright (C) 2020 David Miguel Susano Pinto <carandraug@gmail.com> ## Copyright (C) 2020 Mick Phillips <mick.phillips@gmail.com> ## ## This file is part of Microscope. ## ## Microscope is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## Microscope is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Microscope. If not, see <http://www.gnu.org/licenses/>. import logging import time from enum import IntEnum import microscope.abc # These classes were originally in testsuite but have been moved to # their own subpackage, these imports are for backwards compatibility. from microscope.simulators import ( SimulatedCamera, SimulatedController as TestController, SimulatedDeformableMirror as TestDeformableMirror, SimulatedFilterWheel as TestFilterWheel, SimulatedLightSource, SimulatedStage as TestStage, ) _logger = logging.getLogger(__name__) class CamEnum(IntEnum): A = 1 B = 2 C = 3 D = 4 class TestCamera(SimulatedCamera): # This adds a series of weird settings to the base simulated # camera which are only useful to test settings in cockpit. def __init__(self, **kwargs) -> None: super().__init__(**kwargs) # Enum-setting tests self._intEnum = CamEnum.A self.add_setting( "intEnum", "enum", lambda: self._intEnum, lambda val: setattr(self, "_intEnum", val), CamEnum, ) self._dictEnum = 0 self.add_setting( "dictEnum", "enum", lambda: self._dictEnum, lambda val: setattr(self, "_dictEnum", val), {0: "A", 8: "B", 13: "C", 22: "D"}, ) self._listEnum = 0 self.add_setting( "listEnum", "enum", lambda: self._listEnum, lambda val: setattr(self, "_listEnum", val), ["A", "B", "C", "D"], ) self._tupleEnum = 0 self.add_setting( "tupleEnum", "enum", lambda: self._tupleEnum, lambda val: setattr(self, "_tupleEnum", val), ("A", "B", "C", "D"), ) class TestLaser(SimulatedLightSource): # Deprecated, kept for backwards compatibility. pass class DummySLM(microscope.abc.Device): # This only exists to test cockpit. There is no corresponding # device type in microscope yet. def __init__(self, **kwargs): super().__init__(**kwargs) self.sim_diffraction_angle = 0.0 self.sequence_params = [] self.sequence_index = 0 def _do_shutdown(self) -> None: pass def set_sim_diffraction_angle(self, theta): _logger.info("set_sim_diffraction_angle %f", theta) self.sim_diffraction_angle = theta def get_sim_diffraction_angle(self): return self.sim_diffraction_angle def run(self): self.enabled = True _logger.info("run") return def stop(self): self.enabled = False _logger.info("stop") return def get_sim_sequence(self): return self.sequence_params def set_sim_sequence(self, seq): _logger.info("set_sim_sequence") self.sequence_params = seq return def get_sequence_index(self): return self.sequence_index class DummyDSP(microscope.abc.Device): # This only exists to test cockpit. There is no corresponding # device type in microscope yet. def __init__(self, **kwargs): super().__init__(**kwargs) self._digi = 0 self._ana = [0, 0, 0, 0] self._client = None self._actions = [] def _do_shutdown(self) -> None: pass def Abort(self): _logger.info("Abort") def WriteDigital(self, value): _logger.info("WriteDigital: %s", bin(value)) self._digi = value def MoveAbsolute(self, aline, pos): _logger.info("MoveAbsoluteADU: line %d, value %d", aline, pos) self._ana[aline] = pos def arcl(self, mask, pairs): _logger.info("arcl: %s, %s", mask, pairs) def profileSet(self, pstr, digitals, *analogs): _logger.info("profileSet ...") _logger.info("... ", pstr) _logger.info("... ", digitals) _logger.info("... ", analogs) def DownloadProfile(self): _logger.info("DownloadProfile") def InitProfile(self, numReps): _logger.info("InitProfile") def trigCollect(self, *args, **kwargs): _logger.info("trigCollect: ... ") _logger.info(args) _logger.info(kwargs) def ReadPosition(self, aline): _logger.info( "ReadPosition : line %d, value %d", aline, self._ana[aline] ) return self._ana[aline] def ReadDigital(self): _logger.info("ReadDigital: %s", bin(self._digi)) return self._digi def PrepareActions(self, actions, numReps=1): _logger.info("PrepareActions") self._actions = actions self._repeats = numReps def RunActions(self): _logger.info("RunActions ...") for i in range(self._repeats): for a in self._actions: _logger.info(a) time.sleep(a[0] / 1000.0) if self._client: self._client.receiveData("DSP done") _logger.info("... RunActions done.") def receiveClient(self, *args, **kwargs): # XXX: maybe this should be on its own mixin instead of on DataDevice return microscope.abc.DataDevice.receiveClient(self, *args, **kwargs) def set_client(self, *args, **kwargs): # XXX: maybe this should be on its own mixin instead of on DataDevice return microscope.abc.DataDevice.set_client(self, *args, **kwargs) class TestFloatingDevice( microscope.abc.FloatingDeviceMixin, microscope.abc.Device ): """Simple device with a UID after having been initialized. Floating devices are devices where we can't specify which one to get, we can only construct it and after initialisation check its UID. In this class for test units we can check which UID to get. """ def __init__(self, uid: str, **kwargs) -> None: super().__init__(**kwargs) self._initialized = False self._uid = uid self.initialize() def initialize(self) -> None: super().initialize() self._initialized = True def get_index(self) -> int: """Expose private _index for testing purposes.""" return self._index def get_id(self) -> str: if self._initialized: return self._uid else: raise microscope.IncompatibleStateError( "uid is not available until after initialisation" ) def _do_shutdown(self) -> None: pass
gpl-3.0
gmt/portage
pym/portage/output.py
2
24509
# Copyright 1998-2015 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from __future__ import division __docformat__ = "epytext" import errno import io import re import subprocess import sys import portage portage.proxy.lazyimport.lazyimport(globals(), 'portage.util:writemsg', ) import portage.util.formatter as formatter from portage import os from portage import _encodings from portage import _unicode_encode from portage import _unicode_decode from portage.const import COLOR_MAP_FILE from portage.exception import CommandNotFound, FileNotFound, \ ParseError, PermissionDenied, PortageException from portage.localization import _ havecolor = 1 dotitles = 1 _styles = {} """Maps style class to tuple of attribute names.""" codes = {} """Maps attribute name to ansi code.""" esc_seq = "\x1b[" codes["normal"] = esc_seq + "0m" codes["reset"] = esc_seq + "39;49;00m" codes["bold"] = esc_seq + "01m" codes["faint"] = esc_seq + "02m" codes["standout"] = esc_seq + "03m" codes["underline"] = esc_seq + "04m" codes["blink"] = esc_seq + "05m" codes["overline"] = esc_seq + "06m" codes["reverse"] = esc_seq + "07m" codes["invisible"] = esc_seq + "08m" codes["no-attr"] = esc_seq + "22m" codes["no-standout"] = esc_seq + "23m" codes["no-underline"] = esc_seq + "24m" codes["no-blink"] = esc_seq + "25m" codes["no-overline"] = esc_seq + "26m" codes["no-reverse"] = esc_seq + "27m" codes["bg_black"] = esc_seq + "40m" codes["bg_darkred"] = esc_seq + "41m" codes["bg_darkgreen"] = esc_seq + "42m" codes["bg_brown"] = esc_seq + "43m" codes["bg_darkblue"] = esc_seq + "44m" codes["bg_purple"] = esc_seq + "45m" codes["bg_teal"] = esc_seq + "46m" codes["bg_lightgray"] = esc_seq + "47m" codes["bg_default"] = esc_seq + "49m" codes["bg_darkyellow"] = codes["bg_brown"] def color(fg, bg="default", attr=["normal"]): mystr = codes[fg] for x in [bg]+attr: mystr += codes[x] return mystr ansi_codes = [] for x in range(30, 38): ansi_codes.append("%im" % x) ansi_codes.append("%i;01m" % x) rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00', '0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA', '0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF'] for x in range(len(rgb_ansi_colors)): codes[rgb_ansi_colors[x]] = esc_seq + ansi_codes[x] del x codes["black"] = codes["0x000000"] codes["darkgray"] = codes["0x555555"] codes["red"] = codes["0xFF5555"] codes["darkred"] = codes["0xAA0000"] codes["green"] = codes["0x55FF55"] codes["darkgreen"] = codes["0x00AA00"] codes["yellow"] = codes["0xFFFF55"] codes["brown"] = codes["0xAA5500"] codes["blue"] = codes["0x5555FF"] codes["darkblue"] = codes["0x0000AA"] codes["fuchsia"] = codes["0xFF55FF"] codes["purple"] = codes["0xAA00AA"] codes["turquoise"] = codes["0x55FFFF"] codes["teal"] = codes["0x00AAAA"] codes["white"] = codes["0xFFFFFF"] codes["lightgray"] = codes["0xAAAAAA"] codes["darkteal"] = codes["turquoise"] # Some terminals have darkyellow instead of brown. codes["0xAAAA00"] = codes["brown"] codes["darkyellow"] = codes["0xAAAA00"] # Colors from /etc/init.d/functions.sh _styles["NORMAL"] = ( "normal", ) _styles["GOOD"] = ( "green", ) _styles["WARN"] = ( "yellow", ) _styles["BAD"] = ( "red", ) _styles["HILITE"] = ( "teal", ) _styles["BRACKET"] = ( "blue", ) # Portage functions _styles["INFORM"] = ( "darkgreen", ) _styles["UNMERGE_WARN"] = ( "red", ) _styles["SECURITY_WARN"] = ( "red", ) _styles["MERGE_LIST_PROGRESS"] = ( "yellow", ) _styles["PKG_BLOCKER"] = ( "red", ) _styles["PKG_BLOCKER_SATISFIED"] = ( "darkblue", ) _styles["PKG_MERGE"] = ( "darkgreen", ) _styles["PKG_MERGE_SYSTEM"] = ( "darkgreen", ) _styles["PKG_MERGE_WORLD"] = ( "green", ) _styles["PKG_BINARY_MERGE"] = ( "purple", ) _styles["PKG_BINARY_MERGE_SYSTEM"] = ( "purple", ) _styles["PKG_BINARY_MERGE_WORLD"] = ( "fuchsia", ) _styles["PKG_UNINSTALL"] = ( "red", ) _styles["PKG_NOMERGE"] = ( "darkblue", ) _styles["PKG_NOMERGE_SYSTEM"] = ( "darkblue", ) _styles["PKG_NOMERGE_WORLD"] = ( "blue", ) _styles["PROMPT_CHOICE_DEFAULT"] = ( "green", ) _styles["PROMPT_CHOICE_OTHER"] = ( "red", ) def _parse_color_map(config_root='/', onerror=None): """ Parse /etc/portage/color.map and return a dict of error codes. @param onerror: an optional callback to handle any ParseError that would otherwise be raised @type onerror: callable @rtype: dict @return: a dictionary mapping color classes to color codes """ global codes, _styles myfile = os.path.join(config_root, COLOR_MAP_FILE) ansi_code_pattern = re.compile("^[0-9;]*m$") quotes = '\'"' def strip_quotes(token): if token[0] in quotes and token[0] == token[-1]: token = token[1:-1] return token try: with io.open(_unicode_encode(myfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') as f: lines = f.readlines() for lineno, line in enumerate(lines): commenter_pos = line.find("#") line = line[:commenter_pos].strip() if len(line) == 0: continue split_line = line.split("=") if len(split_line) != 2: e = ParseError(_("'%s', line %s: expected exactly one occurrence of '=' operator") % \ (myfile, lineno)) raise e if onerror: onerror(e) else: raise e continue k = strip_quotes(split_line[0].strip()) v = strip_quotes(split_line[1].strip()) if not k in _styles and not k in codes: e = ParseError(_("'%s', line %s: Unknown variable: '%s'") % \ (myfile, lineno, k)) if onerror: onerror(e) else: raise e continue if ansi_code_pattern.match(v): if k in _styles: _styles[k] = ( esc_seq + v, ) elif k in codes: codes[k] = esc_seq + v else: code_list = [] for x in v.split(): if x in codes: if k in _styles: code_list.append(x) elif k in codes: code_list.append(codes[x]) else: e = ParseError(_("'%s', line %s: Undefined: '%s'") % \ (myfile, lineno, x)) if onerror: onerror(e) else: raise e if k in _styles: _styles[k] = tuple(code_list) elif k in codes: codes[k] = "".join(code_list) except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise FileNotFound(myfile) elif e.errno == errno.EACCES: raise PermissionDenied(myfile) raise def nc_len(mystr): tmp = re.sub(esc_seq + "^m]+m", "", mystr); return len(tmp) _legal_terms_re = re.compile(r'^(xterm|xterm-color|Eterm|aterm|rxvt|screen|kterm|rxvt-unicode|gnome|interix)') _disable_xtermTitle = None _max_xtermTitle_len = 253 def xtermTitle(mystr, raw=False): global _disable_xtermTitle if _disable_xtermTitle is None: _disable_xtermTitle = not (sys.__stderr__.isatty() and \ 'TERM' in os.environ and \ _legal_terms_re.match(os.environ['TERM']) is not None) if dotitles and not _disable_xtermTitle: # If the title string is too big then the terminal can # misbehave. Therefore, truncate it if it's too big. if len(mystr) > _max_xtermTitle_len: mystr = mystr[:_max_xtermTitle_len] if not raw: mystr = '\x1b]0;%s\x07' % mystr # avoid potential UnicodeEncodeError mystr = _unicode_encode(mystr, encoding=_encodings['stdio'], errors='backslashreplace') f = sys.stderr if sys.hexversion >= 0x3000000: f = f.buffer f.write(mystr) f.flush() default_xterm_title = None def xtermTitleReset(): global default_xterm_title if default_xterm_title is None: prompt_command = os.environ.get('PROMPT_COMMAND') if prompt_command == "": default_xterm_title = "" elif prompt_command is not None: if dotitles and \ 'TERM' in os.environ and \ _legal_terms_re.match(os.environ['TERM']) is not None and \ sys.__stderr__.isatty(): from portage.process import find_binary, spawn shell = os.environ.get("SHELL") if not shell or not os.access(shell, os.EX_OK): shell = find_binary("sh") if shell: spawn([shell, "-c", prompt_command], env=os.environ, fd_pipes={ 0: portage._get_stdin().fileno(), 1: sys.__stderr__.fileno(), 2: sys.__stderr__.fileno() }) else: os.system(prompt_command) return else: pwd = os.environ.get('PWD','') home = os.environ.get('HOME', '') if home != '' and pwd.startswith(home): pwd = '~' + pwd[len(home):] default_xterm_title = '\x1b]0;%s@%s:%s\x07' % ( os.environ.get('LOGNAME', ''), os.environ.get('HOSTNAME', '').split('.', 1)[0], pwd) xtermTitle(default_xterm_title, raw=True) def notitles(): "turn off title setting" dotitles = 0 def nocolor(): "turn off colorization" global havecolor havecolor = 0 def resetColor(): return codes["reset"] def style_to_ansi_code(style): """ @param style: A style name @type style: String @rtype: String @return: A string containing one or more ansi escape codes that are used to render the given style. """ ret = "" for attr_name in _styles[style]: # allow stuff that has found it's way through ansi_code_pattern ret += codes.get(attr_name, attr_name) return ret def colormap(): mycolors = [] for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET", "NORMAL"): mycolors.append("%s=$'%s'" % (c, style_to_ansi_code(c))) return "\n".join(mycolors) def colorize(color_key, text): global havecolor if havecolor: if color_key in codes: return codes[color_key] + text + codes["reset"] elif color_key in _styles: return style_to_ansi_code(color_key) + text + codes["reset"] else: return text else: return text compat_functions_colors = [ "bold", "white", "teal", "turquoise", "darkteal", "fuchsia", "purple", "blue", "darkblue", "green", "darkgreen", "yellow", "brown", "darkyellow", "red", "darkred", ] class create_color_func(object): __slots__ = ("_color_key",) def __init__(self, color_key): self._color_key = color_key def __call__(self, text): return colorize(self._color_key, text) for c in compat_functions_colors: globals()[c] = create_color_func(c) class ConsoleStyleFile(object): """ A file-like object that behaves something like the colorize() function. Style identifiers passed in via the new_styles() method will be used to apply console codes to output. """ def __init__(self, f): self._file = f self._styles = None self.write_listener = None def new_styles(self, styles): self._styles = styles def write(self, s): # In python-2.6, DumbWriter.send_line_break() can write # non-unicode '\n' which fails with TypeError if self._file # is a text stream such as io.StringIO. Therefore, make sure # input is converted to unicode when necessary. s = _unicode_decode(s) global havecolor if havecolor and self._styles: styled_s = [] for style in self._styles: styled_s.append(style_to_ansi_code(style)) styled_s.append(s) styled_s.append(codes["reset"]) self._write(self._file, "".join(styled_s)) else: self._write(self._file, s) if self.write_listener: self._write(self.write_listener, s) def _write(self, f, s): # avoid potential UnicodeEncodeError if f in (sys.stdout, sys.stderr): s = _unicode_encode(s, encoding=_encodings['stdio'], errors='backslashreplace') if sys.hexversion >= 0x3000000: f = f.buffer f.write(s) def writelines(self, lines): for s in lines: self.write(s) def flush(self): self._file.flush() def close(self): self._file.close() class StyleWriter(formatter.DumbWriter): """ This is just a DumbWriter with a hook in the new_styles() method that passes a styles tuple as a single argument to a callable style_listener attribute. """ def __init__(self, **kwargs): formatter.DumbWriter.__init__(self, **kwargs) self.style_listener = None def new_styles(self, styles): formatter.DumbWriter.new_styles(self, styles) if self.style_listener: self.style_listener(styles) def get_term_size(fd=None): """ Get the number of lines and columns of the tty that is connected to fd. Returns a tuple of (lines, columns) or (0, 0) if an error occurs. The curses module is used if available, otherwise the output of `stty size` is parsed. The lines and columns values are guaranteed to be greater than or equal to zero, since a negative COLUMNS variable is known to prevent some commands from working (see bug #394091). """ if fd is None: fd = sys.stdout if not hasattr(fd, 'isatty') or not fd.isatty(): return (0, 0) try: import curses try: curses.setupterm(term=os.environ.get("TERM", "unknown"), fd=fd.fileno()) return curses.tigetnum('lines'), curses.tigetnum('cols') except curses.error: pass except ImportError: pass try: proc = subprocess.Popen(["stty", "size"], stdout=subprocess.PIPE, stderr=fd) except EnvironmentError as e: if e.errno != errno.ENOENT: raise # stty command not found return (0, 0) out = _unicode_decode(proc.communicate()[0]) if proc.wait() == os.EX_OK: out = out.split() if len(out) == 2: try: val = (int(out[0]), int(out[1])) except ValueError: pass else: if val[0] >= 0 and val[1] >= 0: return val return (0, 0) def set_term_size(lines, columns, fd): """ Set the number of lines and columns for the tty that is connected to fd. For portability, this simply calls `stty rows $lines columns $columns`. """ from portage.process import spawn cmd = ["stty", "rows", str(lines), "columns", str(columns)] try: spawn(cmd, env=os.environ, fd_pipes={0:fd}) except CommandNotFound: writemsg(_("portage: stty: command not found\n"), noiselevel=-1) class EOutput(object): """ Performs fancy terminal formatting for status and informational messages. The provided methods produce identical terminal output to the eponymous functions in the shell script C{/sbin/functions.sh} and also accept identical parameters. This is not currently a drop-in replacement however, as the output-related functions in C{/sbin/functions.sh} are oriented for use mainly by system init scripts and ebuilds and their output can be customized via certain C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not customizable in this manner since it's intended for more general uses. Likewise, no logging is provided. @ivar quiet: Specifies if output should be silenced. @type quiet: BooleanType @ivar term_columns: Width of terminal in characters. Defaults to the value specified by the shell's C{COLUMNS} variable, else to the queried tty size, else to C{80}. @type term_columns: IntType """ def __init__(self, quiet=False): self.__last_e_cmd = "" self.__last_e_len = 0 self.quiet = quiet lines, columns = get_term_size() if columns <= 0: columns = 80 self.term_columns = columns sys.stdout.flush() sys.stderr.flush() def _write(self, f, s): # avoid potential UnicodeEncodeError writemsg(s, noiselevel=-1, fd=f) def __eend(self, caller, errno, msg): if errno == 0: status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]") else: status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]") if msg: if caller == "eend": self.eerror(msg[0]) elif caller == "ewend": self.ewarn(msg[0]) if self.__last_e_cmd != "ebegin": self.__last_e_len = 0 if not self.quiet: out = sys.stdout self._write(out, "%*s%s\n" % ((self.term_columns - self.__last_e_len - 7), "", status_brackets)) def ebegin(self, msg): """ Shows a message indicating the start of a process. @param msg: A very brief (shorter than one line) description of the starting process. @type msg: StringType """ msg += " ..." if not self.quiet: self.einfon(msg) self.__last_e_len = len(msg) + 3 self.__last_e_cmd = "ebegin" def eend(self, errno, *msg): """ Indicates the completion of a process, optionally displaying a message via L{eerror} if the process's exit status isn't C{0}. @param errno: A standard UNIX C{errno} code returned by processes upon exit. @type errno: IntType @param msg: I{(optional)} An error message, typically a standard UNIX error string corresponding to C{errno}. @type msg: StringType """ if not self.quiet: self.__eend("eend", errno, msg) self.__last_e_cmd = "eend" def eerror(self, msg): """ Shows an error message. @param msg: A very brief (shorter than one line) error message. @type msg: StringType """ out = sys.stderr if not self.quiet: if self.__last_e_cmd == "ebegin": self._write(out, "\n") self._write(out, colorize("BAD", " * ") + msg + "\n") self.__last_e_cmd = "eerror" def einfo(self, msg): """ Shows an informative message terminated with a newline. @param msg: A very brief (shorter than one line) informative message. @type msg: StringType """ out = sys.stdout if not self.quiet: if self.__last_e_cmd == "ebegin": self._write(out, "\n") self._write(out, colorize("GOOD", " * ") + msg + "\n") self.__last_e_cmd = "einfo" def einfon(self, msg): """ Shows an informative message terminated without a newline. @param msg: A very brief (shorter than one line) informative message. @type msg: StringType """ out = sys.stdout if not self.quiet: if self.__last_e_cmd == "ebegin": self._write(out, "\n") self._write(out, colorize("GOOD", " * ") + msg) self.__last_e_cmd = "einfon" def ewarn(self, msg): """ Shows a warning message. @param msg: A very brief (shorter than one line) warning message. @type msg: StringType """ out = sys.stderr if not self.quiet: if self.__last_e_cmd == "ebegin": self._write(out, "\n") self._write(out, colorize("WARN", " * ") + msg + "\n") self.__last_e_cmd = "ewarn" def ewend(self, errno, *msg): """ Indicates the completion of a process, optionally displaying a message via L{ewarn} if the process's exit status isn't C{0}. @param errno: A standard UNIX C{errno} code returned by processes upon exit. @type errno: IntType @param msg: I{(optional)} A warning message, typically a standard UNIX error string corresponding to C{errno}. @type msg: StringType """ if not self.quiet: self.__eend("ewend", errno, msg) self.__last_e_cmd = "ewend" class ProgressBar(object): """The interface is copied from the ProgressBar class from the EasyDialogs module (which is Mac only).""" def __init__(self, title=None, maxval=0, label=None, max_desc_length=25): self._title = title or "" self._maxval = maxval self._label = label or "" self._curval = 0 self._desc = "" self._desc_max_length = max_desc_length self._set_desc() @property def curval(self): """ The current value (of type integer or long integer) of the progress bar. The normal access methods coerce curval between 0 and maxval. This attribute should not be altered directly. """ return self._curval @property def maxval(self): """ The maximum value (of type integer or long integer) of the progress bar; the progress bar (thermometer style) is full when curval equals maxval. If maxval is 0, the bar will be indeterminate (barber-pole). This attribute should not be altered directly. """ return self._maxval def title(self, newstr): """Sets the text in the title bar of the progress dialog to newstr.""" self._title = newstr self._set_desc() def label(self, newstr): """Sets the text in the progress box of the progress dialog to newstr.""" self._label = newstr self._set_desc() def _set_desc(self): self._desc = "%s%s" % ( "%s: " % self._title if self._title else "", "%s" % self._label if self._label else "" ) if len(self._desc) > self._desc_max_length: # truncate if too long self._desc = "%s..." % self._desc[:self._desc_max_length - 3] if len(self._desc): self._desc = self._desc.ljust(self._desc_max_length) def set(self, value, maxval=None): """ Sets the progress bar's curval to value, and also maxval to max if the latter is provided. value is first coerced between 0 and maxval. The thermometer bar is updated to reflect the changes, including a change from indeterminate to determinate or vice versa. """ if maxval is not None: self._maxval = maxval if value < 0: value = 0 elif value > self._maxval: value = self._maxval self._curval = value def inc(self, n=1): """Increments the progress bar's curval by n, or by 1 if n is not provided. (Note that n may be negative, in which case the effect is a decrement.) The progress bar is updated to reflect the change. If the bar is indeterminate, this causes one ``spin'' of the barber pole. The resulting curval is coerced between 0 and maxval if incrementing causes it to fall outside this range. """ self.set(self._curval+n) class TermProgressBar(ProgressBar): """A tty progress bar similar to wget's.""" def __init__(self, fd=sys.stdout, **kwargs): ProgressBar.__init__(self, **kwargs) lines, self.term_columns = get_term_size(fd) self.file = fd self._min_columns = 11 self._max_columns = 80 # for indeterminate mode, ranges from 0.0 to 1.0 self._position = 0.0 def set(self, value, maxval=None): ProgressBar.set(self, value, maxval=maxval) self._display_image(self._create_image()) def _display_image(self, image): self.file.write('\r') self.file.write(image) self.file.flush() def _create_image(self): cols = self.term_columns if cols > self._max_columns: cols = self._max_columns min_columns = self._min_columns curval = self._curval maxval = self._maxval position = self._position percentage_str_width = 5 square_brackets_width = 2 if cols < percentage_str_width: return "" bar_space = cols - percentage_str_width - square_brackets_width - 1 if self._desc: bar_space -= self._desc_max_length if maxval == 0: max_bar_width = bar_space-3 _percent = "".ljust(percentage_str_width) if cols < min_columns: return "" if position <= 0.5: offset = 2 * position else: offset = 2 * (1 - position) delta = 0.5 / max_bar_width position += delta if position >= 1.0: position = 0.0 # make sure it touches the ends if 1.0 - position < delta: position = 1.0 if position < 0.5 and 0.5 - position < delta: position = 0.5 self._position = position bar_width = int(offset * max_bar_width) image = "%s%s%s" % (self._desc, _percent, "[" + (bar_width * " ") + \ "<=>" + ((max_bar_width - bar_width) * " ") + "]") return image else: percentage = 100 * curval // maxval max_bar_width = bar_space - 1 _percent = ("%d%% " % percentage).rjust(percentage_str_width) image = "%s%s" % (self._desc, _percent) if cols < min_columns: return image offset = curval / maxval bar_width = int(offset * max_bar_width) image = image + "[" + (bar_width * "=") + \ ">" + ((max_bar_width - bar_width) * " ") + "]" return image _color_map_loaded = False def _init(config_root='/'): """ Load color.map from the given config_root. This is called automatically on first access of the codes or _styles attributes (unless it has already been called for some other reason). """ global _color_map_loaded, codes, _styles if _color_map_loaded: return _color_map_loaded = True codes = object.__getattribute__(codes, '_attr') _styles = object.__getattribute__(_styles, '_attr') for k, v in codes.items(): codes[k] = _unicode_decode(v) for k, v in _styles.items(): _styles[k] = _unicode_decode(v) try: _parse_color_map(config_root=config_root, onerror=lambda e: writemsg("%s\n" % str(e), noiselevel=-1)) except FileNotFound: pass except PermissionDenied as e: writemsg(_("Permission denied: '%s'\n") % str(e), noiselevel=-1) del e except PortageException as e: writemsg("%s\n" % str(e), noiselevel=-1) del e class _LazyInitColorMap(portage.proxy.objectproxy.ObjectProxy): __slots__ = ('_attr',) def __init__(self, attr): portage.proxy.objectproxy.ObjectProxy.__init__(self) object.__setattr__(self, '_attr', attr) def _get_target(self): _init() return object.__getattribute__(self, '_attr') codes = _LazyInitColorMap(codes) _styles = _LazyInitColorMap(_styles)
gpl-2.0
mehrdada/grpc
third_party/nanopb/tests/site_scons/site_tools/nanopb.py
79
4144
''' Scons Builder for nanopb .proto definitions. This tool will locate the nanopb generator and use it to generate .pb.c and .pb.h files from the .proto files. Basic example ------------- # Build myproto.pb.c and myproto.pb.h from myproto.proto myproto = env.NanopbProto("myproto") # Link nanopb core to the program env.Append(CPPPATH = "$NANOB") myprog = env.Program(["myprog.c", myproto, "$NANOPB/pb_encode.c", "$NANOPB/pb_decode.c"]) Configuration options --------------------- Normally, this script is used in the test environment of nanopb and it locates the nanopb generator by a relative path. If this script is used in another application, the path to nanopb root directory has to be defined: env.SetDefault(NANOPB = "path/to/nanopb") Additionally, the path to protoc and the options to give to protoc can be defined manually: env.SetDefault(PROTOC = "path/to/protoc") env.SetDefault(PROTOCFLAGS = "--plugin=protoc-gen-nanopb=path/to/protoc-gen-nanopb") ''' import SCons.Action import SCons.Builder import SCons.Util import os.path class NanopbWarning(SCons.Warnings.Warning): pass SCons.Warnings.enableWarningClass(NanopbWarning) def _detect_nanopb(env): '''Find the path to nanopb root directory.''' if env.has_key('NANOPB'): # Use nanopb dir given by user return env['NANOPB'] p = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) if os.path.isdir(p) and os.path.isfile(os.path.join(p, 'pb.h')): # Assume we are running under tests/site_scons/site_tools return p raise SCons.Errors.StopError(NanopbWarning, "Could not find the nanopb root directory") def _detect_protoc(env): '''Find the path to the protoc compiler.''' if env.has_key('PROTOC'): # Use protoc defined by user return env['PROTOC'] n = _detect_nanopb(env) p1 = os.path.join(n, 'generator-bin', 'protoc' + env['PROGSUFFIX']) if os.path.exists(p1): # Use protoc bundled with binary package return env['ESCAPE'](p1) p = env.WhereIs('protoc') if p: # Use protoc from path return env['ESCAPE'](p) raise SCons.Errors.StopError(NanopbWarning, "Could not find the protoc compiler") def _detect_protocflags(env): '''Find the options to use for protoc.''' if env.has_key('PROTOCFLAGS'): return env['PROTOCFLAGS'] p = _detect_protoc(env) n = _detect_nanopb(env) p1 = os.path.join(n, 'generator-bin', 'protoc' + env['PROGSUFFIX']) if p == env['ESCAPE'](p1): # Using the bundled protoc, no options needed return '' e = env['ESCAPE'] if env['PLATFORM'] == 'win32': return e('--plugin=protoc-gen-nanopb=' + os.path.join(n, 'generator', 'protoc-gen-nanopb.bat')) else: return e('--plugin=protoc-gen-nanopb=' + os.path.join(n, 'generator', 'protoc-gen-nanopb')) def _nanopb_proto_actions(source, target, env, for_signature): esc = env['ESCAPE'] dirs = ' '.join(['-I' + esc(env.GetBuildPath(d)) for d in env['PROTOCPATH']]) return '$PROTOC $PROTOCFLAGS %s --nanopb_out=. %s' % (dirs, esc(str(source[0]))) def _nanopb_proto_emitter(target, source, env): basename = os.path.splitext(str(source[0]))[0] target.append(basename + '.pb.h') if os.path.exists(basename + '.options'): source.append(basename + '.options') return target, source _nanopb_proto_builder = SCons.Builder.Builder( generator = _nanopb_proto_actions, suffix = '.pb.c', src_suffix = '.proto', emitter = _nanopb_proto_emitter) def generate(env): '''Add Builder for nanopb protos.''' env['NANOPB'] = _detect_nanopb(env) env['PROTOC'] = _detect_protoc(env) env['PROTOCFLAGS'] = _detect_protocflags(env) env.SetDefault(PROTOCPATH = ['.', os.path.join(env['NANOPB'], 'generator', 'proto')]) env.SetDefault(NANOPB_PROTO_CMD = '$PROTOC $PROTOCFLAGS --nanopb_out=. $SOURCES') env['BUILDERS']['NanopbProto'] = _nanopb_proto_builder def exists(env): return _detect_protoc(env) and _detect_protoc_opts(env)
apache-2.0
andreebrazeau/vindicia-python
setup.py
1
1270
from setuptools import setup import os.path import re with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as README: DESCRIPTION = README.read() VERSION_RE = re.compile("^__version__ = '(.+)'$", flags=re.MULTILINE) with open(os.path.join(os.path.dirname(__file__), 'vindicia', '__init__.py')) as PACKAGE: VERSION = VERSION_RE.search(PACKAGE.read()).group(1) requires = [ 'suds-jurko', ] tests_require = [ 'nose', 'mock', ] setup( name='vindicia', version=VERSION, description="A python client wrapper to the Vindicia api.", long_description=DESCRIPTION, author='Andree Brazeau', author_email='andreebrazeau@gmail.com', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', ], packages=['vindicia'], install_requires=requires, tests_require=tests_require, test_suite='unittest2.collector', zip_safe=True, include_package_data=True, )
mit
saturday-shi/spark
python/pyspark/mllib/linalg/__init__.py
54
45829
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ MLlib utilities for linear algebra. For dense vectors, MLlib uses the NumPy C{array} type, so you can simply pass NumPy arrays around. For sparse vectors, users can construct a L{SparseVector} object from MLlib or pass SciPy C{scipy.sparse} column vectors if SciPy is available in their environment. """ import sys import array import struct if sys.version >= '3': basestring = str xrange = range import copyreg as copy_reg long = int else: from itertools import izip as zip import copy_reg import numpy as np from pyspark import since from pyspark.ml import linalg as newlinalg from pyspark.sql.types import UserDefinedType, StructField, StructType, ArrayType, DoubleType, \ IntegerType, ByteType, BooleanType __all__ = ['Vector', 'DenseVector', 'SparseVector', 'Vectors', 'Matrix', 'DenseMatrix', 'SparseMatrix', 'Matrices', 'QRDecomposition'] if sys.version_info[:2] == (2, 7): # speed up pickling array in Python 2.7 def fast_pickle_array(ar): return array.array, (ar.typecode, ar.tostring()) copy_reg.pickle(array.array, fast_pickle_array) # Check whether we have SciPy. MLlib works without it too, but if we have it, some methods, # such as _dot and _serialize_double_vector, start to support scipy.sparse matrices. try: import scipy.sparse _have_scipy = True except: # No SciPy in environment, but that's okay _have_scipy = False def _convert_to_vector(l): if isinstance(l, Vector): return l elif type(l) in (array.array, np.array, np.ndarray, list, tuple, xrange): return DenseVector(l) elif _have_scipy and scipy.sparse.issparse(l): assert l.shape[1] == 1, "Expected column vector" # Make sure the converted csc_matrix has sorted indices. csc = l.tocsc() if not csc.has_sorted_indices: csc.sort_indices() return SparseVector(l.shape[0], csc.indices, csc.data) else: raise TypeError("Cannot convert type %s into Vector" % type(l)) def _vector_size(v): """ Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector """ if isinstance(v, Vector): return len(v) elif type(v) in (array.array, list, tuple, xrange): return len(v) elif type(v) == np.ndarray: if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1): return len(v) else: raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape)) elif _have_scipy and scipy.sparse.issparse(v): assert v.shape[1] == 1, "Expected column vector" return v.shape[0] else: raise TypeError("Cannot treat type %s as a vector" % type(v)) def _format_float(f, digits=4): s = str(round(f, digits)) if '.' in s: s = s[:s.index('.') + 1 + digits] return s def _format_float_list(l): return [_format_float(x) for x in l] def _double_to_long_bits(value): if np.isnan(value): value = float('nan') # pack double into 64 bits, then unpack as long int return struct.unpack('Q', struct.pack('d', value))[0] class VectorUDT(UserDefinedType): """ SQL user-defined type (UDT) for Vector. """ @classmethod def sqlType(cls): return StructType([ StructField("type", ByteType(), False), StructField("size", IntegerType(), True), StructField("indices", ArrayType(IntegerType(), False), True), StructField("values", ArrayType(DoubleType(), False), True)]) @classmethod def module(cls): return "pyspark.mllib.linalg" @classmethod def scalaUDT(cls): return "org.apache.spark.mllib.linalg.VectorUDT" def serialize(self, obj): if isinstance(obj, SparseVector): indices = [int(i) for i in obj.indices] values = [float(v) for v in obj.values] return (0, obj.size, indices, values) elif isinstance(obj, DenseVector): values = [float(v) for v in obj] return (1, None, None, values) else: raise TypeError("cannot serialize %r of type %r" % (obj, type(obj))) def deserialize(self, datum): assert len(datum) == 4, \ "VectorUDT.deserialize given row with length %d but requires 4" % len(datum) tpe = datum[0] if tpe == 0: return SparseVector(datum[1], datum[2], datum[3]) elif tpe == 1: return DenseVector(datum[3]) else: raise ValueError("do not recognize type %r" % tpe) def simpleString(self): return "vector" class MatrixUDT(UserDefinedType): """ SQL user-defined type (UDT) for Matrix. """ @classmethod def sqlType(cls): return StructType([ StructField("type", ByteType(), False), StructField("numRows", IntegerType(), False), StructField("numCols", IntegerType(), False), StructField("colPtrs", ArrayType(IntegerType(), False), True), StructField("rowIndices", ArrayType(IntegerType(), False), True), StructField("values", ArrayType(DoubleType(), False), True), StructField("isTransposed", BooleanType(), False)]) @classmethod def module(cls): return "pyspark.mllib.linalg" @classmethod def scalaUDT(cls): return "org.apache.spark.mllib.linalg.MatrixUDT" def serialize(self, obj): if isinstance(obj, SparseMatrix): colPtrs = [int(i) for i in obj.colPtrs] rowIndices = [int(i) for i in obj.rowIndices] values = [float(v) for v in obj.values] return (0, obj.numRows, obj.numCols, colPtrs, rowIndices, values, bool(obj.isTransposed)) elif isinstance(obj, DenseMatrix): values = [float(v) for v in obj.values] return (1, obj.numRows, obj.numCols, None, None, values, bool(obj.isTransposed)) else: raise TypeError("cannot serialize type %r" % (type(obj))) def deserialize(self, datum): assert len(datum) == 7, \ "MatrixUDT.deserialize given row with length %d but requires 7" % len(datum) tpe = datum[0] if tpe == 0: return SparseMatrix(*datum[1:]) elif tpe == 1: return DenseMatrix(datum[1], datum[2], datum[5], datum[6]) else: raise ValueError("do not recognize type %r" % tpe) def simpleString(self): return "matrix" class Vector(object): __UDT__ = VectorUDT() """ Abstract class for DenseVector and SparseVector """ def toArray(self): """ Convert the vector into an numpy.ndarray :return: numpy.ndarray """ raise NotImplementedError def asML(self): """ Convert this vector to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.Vector` """ raise NotImplementedError class DenseVector(Vector): """ A dense vector represented by a value array. We use numpy array for storage and arithmetics will be delegated to the underlying numpy array. >>> v = Vectors.dense([1.0, 2.0]) >>> u = Vectors.dense([3.0, 4.0]) >>> v + u DenseVector([4.0, 6.0]) >>> 2 - v DenseVector([1.0, 0.0]) >>> v / 2 DenseVector([0.5, 1.0]) >>> v * u DenseVector([3.0, 8.0]) >>> u / v DenseVector([3.0, 2.0]) >>> u % 2 DenseVector([1.0, 0.0]) """ def __init__(self, ar): if isinstance(ar, bytes): ar = np.frombuffer(ar, dtype=np.float64) elif not isinstance(ar, np.ndarray): ar = np.array(ar, dtype=np.float64) if ar.dtype != np.float64: ar = ar.astype(np.float64) self.array = ar @staticmethod def parse(s): """ Parse string representation back into the DenseVector. >>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]') DenseVector([0.0, 1.0, 2.0, 3.0]) """ start = s.find('[') if start == -1: raise ValueError("Array should start with '['.") end = s.find(']') if end == -1: raise ValueError("Array should end with ']'.") s = s[start + 1: end] try: values = [float(val) for val in s.split(',') if val] except ValueError: raise ValueError("Unable to parse values from %s" % s) return DenseVector(values) def __reduce__(self): return DenseVector, (self.array.tostring(),) def numNonzeros(self): """ Number of nonzero elements. This scans all active values and count non zeros """ return np.count_nonzero(self.array) def norm(self, p): """ Calculates the norm of a DenseVector. >>> a = DenseVector([0, -1, 2, -3]) >>> a.norm(2) 3.7... >>> a.norm(1) 6.0 """ return np.linalg.norm(self.array, p) def dot(self, other): """ Compute the dot product of two Vectors. We support (Numpy array, list, SparseVector, or SciPy sparse) and a target NumPy array that is either 1- or 2-dimensional. Equivalent to calling numpy.dot of the two vectors. >>> dense = DenseVector(array.array('d', [1., 2.])) >>> dense.dot(dense) 5.0 >>> dense.dot(SparseVector(2, [0, 1], [2., 1.])) 4.0 >>> dense.dot(range(1, 3)) 5.0 >>> dense.dot(np.array(range(1, 3))) 5.0 >>> dense.dot([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F')) array([ 5., 11.]) >>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F')) Traceback (most recent call last): ... AssertionError: dimension mismatch """ if type(other) == np.ndarray: if other.ndim > 1: assert len(self) == other.shape[0], "dimension mismatch" return np.dot(self.array, other) elif _have_scipy and scipy.sparse.issparse(other): assert len(self) == other.shape[0], "dimension mismatch" return other.transpose().dot(self.toArray()) else: assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, SparseVector): return other.dot(self) elif isinstance(other, Vector): return np.dot(self.toArray(), other.toArray()) else: return np.dot(self.toArray(), other) def squared_distance(self, other): """ Squared distance of two Vectors. >>> dense1 = DenseVector(array.array('d', [1., 2.])) >>> dense1.squared_distance(dense1) 0.0 >>> dense2 = np.array([2., 1.]) >>> dense1.squared_distance(dense2) 2.0 >>> dense3 = [2., 1.] >>> dense1.squared_distance(dense3) 2.0 >>> sparse1 = SparseVector(2, [0, 1], [2., 1.]) >>> dense1.squared_distance(sparse1) 2.0 >>> dense1.squared_distance([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense1.squared_distance(SparseVector(1, [0,], [1.,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """ assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, SparseVector): return other.squared_distance(self) elif _have_scipy and scipy.sparse.issparse(other): return _convert_to_vector(other).squared_distance(self) if isinstance(other, Vector): other = other.toArray() elif not isinstance(other, np.ndarray): other = np.array(other) diff = self.toArray() - other return np.dot(diff, diff) def toArray(self): """ Returns an numpy.ndarray """ return self.array def asML(self): """ Convert this vector to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.DenseVector` .. versionadded:: 2.0.0 """ return newlinalg.DenseVector(self.array) @property def values(self): """ Returns a list of values """ return self.array def __getitem__(self, item): return self.array[item] def __len__(self): return len(self.array) def __str__(self): return "[" + ",".join([str(v) for v in self.array]) + "]" def __repr__(self): return "DenseVector([%s])" % (', '.join(_format_float(i) for i in self.array)) def __eq__(self, other): if isinstance(other, DenseVector): return np.array_equal(self.array, other.array) elif isinstance(other, SparseVector): if len(self) != other.size: return False return Vectors._equals(list(xrange(len(self))), self.array, other.indices, other.values) return False def __ne__(self, other): return not self == other def __hash__(self): size = len(self) result = 31 + size nnz = 0 i = 0 while i < size and nnz < 128: if self.array[i] != 0: result = 31 * result + i bits = _double_to_long_bits(self.array[i]) result = 31 * result + (bits ^ (bits >> 32)) nnz += 1 i += 1 return result def __getattr__(self, item): return getattr(self.array, item) def _delegate(op): def func(self, other): if isinstance(other, DenseVector): other = other.array return DenseVector(getattr(self.array, op)(other)) return func __neg__ = _delegate("__neg__") __add__ = _delegate("__add__") __sub__ = _delegate("__sub__") __mul__ = _delegate("__mul__") __div__ = _delegate("__div__") __truediv__ = _delegate("__truediv__") __mod__ = _delegate("__mod__") __radd__ = _delegate("__radd__") __rsub__ = _delegate("__rsub__") __rmul__ = _delegate("__rmul__") __rdiv__ = _delegate("__rdiv__") __rtruediv__ = _delegate("__rtruediv__") __rmod__ = _delegate("__rmod__") class SparseVector(Vector): """ A simple sparse vector class for passing data to MLlib. Users may alternatively pass SciPy's {scipy.sparse} data types. """ def __init__(self, size, *args): """ Create a sparse vector, using either a dictionary, a list of (index, value) pairs, or two separate arrays of indices and values (sorted by index). :param size: Size of the vector. :param args: Active entries, as a dictionary {index: value, ...}, a list of tuples [(index, value), ...], or a list of strictly increasing indices and a list of corresponding values [index, ...], [value, ...]. Inactive entries are treated as zeros. >>> SparseVector(4, {1: 1.0, 3: 5.5}) SparseVector(4, {1: 1.0, 3: 5.5}) >>> SparseVector(4, [(1, 1.0), (3, 5.5)]) SparseVector(4, {1: 1.0, 3: 5.5}) >>> SparseVector(4, [1, 3], [1.0, 5.5]) SparseVector(4, {1: 1.0, 3: 5.5}) """ self.size = int(size) """ Size of the vector. """ assert 1 <= len(args) <= 2, "must pass either 2 or 3 arguments" if len(args) == 1: pairs = args[0] if type(pairs) == dict: pairs = pairs.items() pairs = sorted(pairs) self.indices = np.array([p[0] for p in pairs], dtype=np.int32) """ A list of indices corresponding to active entries. """ self.values = np.array([p[1] for p in pairs], dtype=np.float64) """ A list of values corresponding to active entries. """ else: if isinstance(args[0], bytes): assert isinstance(args[1], bytes), "values should be string too" if args[0]: self.indices = np.frombuffer(args[0], np.int32) self.values = np.frombuffer(args[1], np.float64) else: # np.frombuffer() doesn't work well with empty string in older version self.indices = np.array([], dtype=np.int32) self.values = np.array([], dtype=np.float64) else: self.indices = np.array(args[0], dtype=np.int32) self.values = np.array(args[1], dtype=np.float64) assert len(self.indices) == len(self.values), "index and value arrays not same length" for i in xrange(len(self.indices) - 1): if self.indices[i] >= self.indices[i + 1]: raise TypeError( "Indices %s and %s are not strictly increasing" % (self.indices[i], self.indices[i + 1])) def numNonzeros(self): """ Number of nonzero elements. This scans all active values and count non zeros. """ return np.count_nonzero(self.values) def norm(self, p): """ Calculates the norm of a SparseVector. >>> a = SparseVector(4, [0, 1], [3., -4.]) >>> a.norm(1) 7.0 >>> a.norm(2) 5.0 """ return np.linalg.norm(self.values, p) def __reduce__(self): return ( SparseVector, (self.size, self.indices.tostring(), self.values.tostring())) @staticmethod def parse(s): """ Parse string representation back into the SparseVector. >>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )') SparseVector(4, {0: 4.0, 1: 5.0}) """ start = s.find('(') if start == -1: raise ValueError("Tuple should start with '('") end = s.find(')') if end == -1: raise ValueError("Tuple should end with ')'") s = s[start + 1: end].strip() size = s[: s.find(',')] try: size = int(size) except ValueError: raise ValueError("Cannot parse size %s." % size) ind_start = s.find('[') if ind_start == -1: raise ValueError("Indices array should start with '['.") ind_end = s.find(']') if ind_end == -1: raise ValueError("Indices array should end with ']'") new_s = s[ind_start + 1: ind_end] ind_list = new_s.split(',') try: indices = [int(ind) for ind in ind_list if ind] except ValueError: raise ValueError("Unable to parse indices from %s." % new_s) s = s[ind_end + 1:].strip() val_start = s.find('[') if val_start == -1: raise ValueError("Values array should start with '['.") val_end = s.find(']') if val_end == -1: raise ValueError("Values array should end with ']'.") val_list = s[val_start + 1: val_end].split(',') try: values = [float(val) for val in val_list if val] except ValueError: raise ValueError("Unable to parse values from %s." % s) return SparseVector(size, indices, values) def dot(self, other): """ Dot product with a SparseVector or 1- or 2-dimensional Numpy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.dot(a) 25.0 >>> a.dot(array.array('d', [1., 2., 3., 4.])) 22.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.dot(b) 0.0 >>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]])) array([ 22., 22.]) >>> a.dot([1., 2., 3.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.array([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(DenseVector([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.zeros((3, 2))) Traceback (most recent call last): ... AssertionError: dimension mismatch """ if isinstance(other, np.ndarray): if other.ndim not in [2, 1]: raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim) assert len(self) == other.shape[0], "dimension mismatch" return np.dot(self.values, other[self.indices]) assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, DenseVector): return np.dot(other.array[self.indices], self.values) elif isinstance(other, SparseVector): # Find out common indices. self_cmind = np.in1d(self.indices, other.indices, assume_unique=True) self_values = self.values[self_cmind] if self_values.size == 0: return 0.0 else: other_cmind = np.in1d(other.indices, self.indices, assume_unique=True) return np.dot(self_values, other.values[other_cmind]) else: return self.dot(_convert_to_vector(other)) def squared_distance(self, other): """ Squared distance from a SparseVector or 1-dimensional NumPy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.squared_distance(a) 0.0 >>> a.squared_distance(array.array('d', [1., 2., 3., 4.])) 11.0 >>> a.squared_distance(np.array([1., 2., 3., 4.])) 11.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.squared_distance(b) 26.0 >>> b.squared_distance(a) 26.0 >>> b.squared_distance([1., 2.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> b.squared_distance(SparseVector(3, [1,], [1.0,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """ assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, np.ndarray) or isinstance(other, DenseVector): if isinstance(other, np.ndarray) and other.ndim != 1: raise Exception("Cannot call squared_distance with %d-dimensional array" % other.ndim) if isinstance(other, DenseVector): other = other.array sparse_ind = np.zeros(other.size, dtype=bool) sparse_ind[self.indices] = True dist = other[sparse_ind] - self.values result = np.dot(dist, dist) other_ind = other[~sparse_ind] result += np.dot(other_ind, other_ind) return result elif isinstance(other, SparseVector): result = 0.0 i, j = 0, 0 while i < len(self.indices) and j < len(other.indices): if self.indices[i] == other.indices[j]: diff = self.values[i] - other.values[j] result += diff * diff i += 1 j += 1 elif self.indices[i] < other.indices[j]: result += self.values[i] * self.values[i] i += 1 else: result += other.values[j] * other.values[j] j += 1 while i < len(self.indices): result += self.values[i] * self.values[i] i += 1 while j < len(other.indices): result += other.values[j] * other.values[j] j += 1 return result else: return self.squared_distance(_convert_to_vector(other)) def toArray(self): """ Returns a copy of this SparseVector as a 1-dimensional NumPy array. """ arr = np.zeros((self.size,), dtype=np.float64) arr[self.indices] = self.values return arr def asML(self): """ Convert this vector to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseVector` .. versionadded:: 2.0.0 """ return newlinalg.SparseVector(self.size, self.indices, self.values) def __len__(self): return self.size def __str__(self): inds = "[" + ",".join([str(i) for i in self.indices]) + "]" vals = "[" + ",".join([str(v) for v in self.values]) + "]" return "(" + ",".join((str(self.size), inds, vals)) + ")" def __repr__(self): inds = self.indices vals = self.values entries = ", ".join(["{0}: {1}".format(inds[i], _format_float(vals[i])) for i in xrange(len(inds))]) return "SparseVector({0}, {{{1}}})".format(self.size, entries) def __eq__(self, other): if isinstance(other, SparseVector): return other.size == self.size and np.array_equal(other.indices, self.indices) \ and np.array_equal(other.values, self.values) elif isinstance(other, DenseVector): if self.size != len(other): return False return Vectors._equals(self.indices, self.values, list(xrange(len(other))), other.array) return False def __getitem__(self, index): inds = self.indices vals = self.values if not isinstance(index, int): raise TypeError( "Indices must be of type integer, got type %s" % type(index)) if index >= self.size or index < -self.size: raise IndexError("Index %d out of bounds." % index) if index < 0: index += self.size if (inds.size == 0) or (index > inds.item(-1)): return 0. insert_index = np.searchsorted(inds, index) row_ind = inds[insert_index] if row_ind == index: return vals[insert_index] return 0. def __ne__(self, other): return not self.__eq__(other) def __hash__(self): result = 31 + self.size nnz = 0 i = 0 while i < len(self.values) and nnz < 128: if self.values[i] != 0: result = 31 * result + int(self.indices[i]) bits = _double_to_long_bits(self.values[i]) result = 31 * result + (bits ^ (bits >> 32)) nnz += 1 i += 1 return result class Vectors(object): """ Factory methods for working with vectors. .. note:: Dense vectors are simply represented as NumPy array objects, so there is no need to covert them for use in MLlib. For sparse vectors, the factory methods in this class create an MLlib-compatible type, or users can pass in SciPy's C{scipy.sparse} column vectors. """ @staticmethod def sparse(size, *args): """ Create a sparse vector, using either a dictionary, a list of (index, value) pairs, or two separate arrays of indices and values (sorted by index). :param size: Size of the vector. :param args: Non-zero entries, as a dictionary, list of tuples, or two sorted lists containing indices and values. >>> Vectors.sparse(4, {1: 1.0, 3: 5.5}) SparseVector(4, {1: 1.0, 3: 5.5}) >>> Vectors.sparse(4, [(1, 1.0), (3, 5.5)]) SparseVector(4, {1: 1.0, 3: 5.5}) >>> Vectors.sparse(4, [1, 3], [1.0, 5.5]) SparseVector(4, {1: 1.0, 3: 5.5}) """ return SparseVector(size, *args) @staticmethod def dense(*elements): """ Create a dense vector of 64-bit floats from a Python list or numbers. >>> Vectors.dense([1, 2, 3]) DenseVector([1.0, 2.0, 3.0]) >>> Vectors.dense(1.0, 2.0) DenseVector([1.0, 2.0]) """ if len(elements) == 1 and not isinstance(elements[0], (float, int, long)): # it's list, numpy.array or other iterable object. elements = elements[0] return DenseVector(elements) @staticmethod def fromML(vec): """ Convert a vector from the new mllib-local representation. This does NOT copy the data; it copies references. :param vec: a :py:class:`pyspark.ml.linalg.Vector` :return: a :py:class:`pyspark.mllib.linalg.Vector` .. versionadded:: 2.0.0 """ if isinstance(vec, newlinalg.DenseVector): return DenseVector(vec.array) elif isinstance(vec, newlinalg.SparseVector): return SparseVector(vec.size, vec.indices, vec.values) else: raise TypeError("Unsupported vector type %s" % type(vec)) @staticmethod def stringify(vector): """ Converts a vector into a string, which can be recognized by Vectors.parse(). >>> Vectors.stringify(Vectors.sparse(2, [1], [1.0])) '(2,[1],[1.0])' >>> Vectors.stringify(Vectors.dense([0.0, 1.0])) '[0.0,1.0]' """ return str(vector) @staticmethod def squared_distance(v1, v2): """ Squared distance between two vectors. a and b can be of type SparseVector, DenseVector, np.ndarray or array.array. >>> a = Vectors.sparse(4, [(0, 1), (3, 4)]) >>> b = Vectors.dense([2, 5, 4, 1]) >>> a.squared_distance(b) 51.0 """ v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2) return v1.squared_distance(v2) @staticmethod def norm(vector, p): """ Find norm of the given vector. """ return _convert_to_vector(vector).norm(p) @staticmethod def parse(s): """Parse a string representation back into the Vector. >>> Vectors.parse('[2,1,2 ]') DenseVector([2.0, 1.0, 2.0]) >>> Vectors.parse(' ( 100, [0], [2])') SparseVector(100, {0: 2.0}) """ if s.find('(') == -1 and s.find('[') != -1: return DenseVector.parse(s) elif s.find('(') != -1: return SparseVector.parse(s) else: raise ValueError( "Cannot find tokens '[' or '(' from the input string.") @staticmethod def zeros(size): return DenseVector(np.zeros(size)) @staticmethod def _equals(v1_indices, v1_values, v2_indices, v2_values): """ Check equality between sparse/dense vectors, v1_indices and v2_indices assume to be strictly increasing. """ v1_size = len(v1_values) v2_size = len(v2_values) k1 = 0 k2 = 0 all_equal = True while all_equal: while k1 < v1_size and v1_values[k1] == 0: k1 += 1 while k2 < v2_size and v2_values[k2] == 0: k2 += 1 if k1 >= v1_size or k2 >= v2_size: return k1 >= v1_size and k2 >= v2_size all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2] k1 += 1 k2 += 1 return all_equal class Matrix(object): __UDT__ = MatrixUDT() """ Represents a local matrix. """ def __init__(self, numRows, numCols, isTransposed=False): self.numRows = numRows self.numCols = numCols self.isTransposed = isTransposed def toArray(self): """ Returns its elements in a NumPy ndarray. """ raise NotImplementedError def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. """ raise NotImplementedError @staticmethod def _convert_to_array(array_like, dtype): """ Convert Matrix attributes which are array-like or buffer to array. """ if isinstance(array_like, bytes): return np.frombuffer(array_like, dtype=dtype) return np.asarray(array_like, dtype=dtype) class DenseMatrix(Matrix): """ Column-major dense matrix. """ def __init__(self, numRows, numCols, values, isTransposed=False): Matrix.__init__(self, numRows, numCols, isTransposed) values = self._convert_to_array(values, np.float64) assert len(values) == numRows * numCols self.values = values def __reduce__(self): return DenseMatrix, ( self.numRows, self.numCols, self.values.tostring(), int(self.isTransposed)) def __str__(self): """ Pretty printing of a DenseMatrix >>> dm = DenseMatrix(2, 2, range(4)) >>> print(dm) DenseMatrix([[ 0., 2.], [ 1., 3.]]) >>> dm = DenseMatrix(2, 2, range(4), isTransposed=True) >>> print(dm) DenseMatrix([[ 0., 1.], [ 2., 3.]]) """ # Inspired by __repr__ in scipy matrices. array_lines = repr(self.toArray()).splitlines() # We need to adjust six spaces which is the difference in number # of letters between "DenseMatrix" and "array" x = '\n'.join([(" " * 6 + line) for line in array_lines[1:]]) return array_lines[0].replace("array", "DenseMatrix") + "\n" + x def __repr__(self): """ Representation of a DenseMatrix >>> dm = DenseMatrix(2, 2, range(4)) >>> dm DenseMatrix(2, 2, [0.0, 1.0, 2.0, 3.0], False) """ # If the number of values are less than seventeen then return as it is. # Else return first eight values and last eight values. if len(self.values) < 17: entries = _format_float_list(self.values) else: entries = ( _format_float_list(self.values[:8]) + ["..."] + _format_float_list(self.values[-8:]) ) entries = ", ".join(entries) return "DenseMatrix({0}, {1}, [{2}], {3})".format( self.numRows, self.numCols, entries, self.isTransposed) def toArray(self): """ Return an numpy.ndarray >>> m = DenseMatrix(2, 2, range(4)) >>> m.toArray() array([[ 0., 2.], [ 1., 3.]]) """ if self.isTransposed: return np.asfortranarray( self.values.reshape((self.numRows, self.numCols))) else: return self.values.reshape((self.numRows, self.numCols), order='F') def toSparse(self): """Convert to SparseMatrix""" if self.isTransposed: values = np.ravel(self.toArray(), order='F') else: values = self.values indices = np.nonzero(values)[0] colCounts = np.bincount(indices // self.numRows) colPtrs = np.cumsum(np.hstack( (0, colCounts, np.zeros(self.numCols - colCounts.size)))) values = values[indices] rowIndices = indices % self.numRows return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values) def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.DenseMatrix` .. versionadded:: 2.0.0 """ return newlinalg.DenseMatrix(self.numRows, self.numCols, self.values, self.isTransposed) def __getitem__(self, indices): i, j = indices if i < 0 or i >= self.numRows: raise IndexError("Row index %d is out of range [0, %d)" % (i, self.numRows)) if j >= self.numCols or j < 0: raise IndexError("Column index %d is out of range [0, %d)" % (j, self.numCols)) if self.isTransposed: return self.values[i * self.numCols + j] else: return self.values[i + j * self.numRows] def __eq__(self, other): if (not isinstance(other, DenseMatrix) or self.numRows != other.numRows or self.numCols != other.numCols): return False self_values = np.ravel(self.toArray(), order='F') other_values = np.ravel(other.toArray(), order='F') return all(self_values == other_values) class SparseMatrix(Matrix): """Sparse Matrix stored in CSC format.""" def __init__(self, numRows, numCols, colPtrs, rowIndices, values, isTransposed=False): Matrix.__init__(self, numRows, numCols, isTransposed) self.colPtrs = self._convert_to_array(colPtrs, np.int32) self.rowIndices = self._convert_to_array(rowIndices, np.int32) self.values = self._convert_to_array(values, np.float64) if self.isTransposed: if self.colPtrs.size != numRows + 1: raise ValueError("Expected colPtrs of size %d, got %d." % (numRows + 1, self.colPtrs.size)) else: if self.colPtrs.size != numCols + 1: raise ValueError("Expected colPtrs of size %d, got %d." % (numCols + 1, self.colPtrs.size)) if self.rowIndices.size != self.values.size: raise ValueError("Expected rowIndices of length %d, got %d." % (self.rowIndices.size, self.values.size)) def __str__(self): """ Pretty printing of a SparseMatrix >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]) >>> print(sm1) 2 X 2 CSCMatrix (0,0) 2.0 (1,0) 3.0 (1,1) 4.0 >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True) >>> print(sm1) 2 X 2 CSRMatrix (0,0) 2.0 (0,1) 3.0 (1,1) 4.0 """ spstr = "{0} X {1} ".format(self.numRows, self.numCols) if self.isTransposed: spstr += "CSRMatrix\n" else: spstr += "CSCMatrix\n" cur_col = 0 smlist = [] # Display first 16 values. if len(self.values) <= 16: zipindval = zip(self.rowIndices, self.values) else: zipindval = zip(self.rowIndices[:16], self.values[:16]) for i, (rowInd, value) in enumerate(zipindval): if self.colPtrs[cur_col + 1] <= i: cur_col += 1 if self.isTransposed: smlist.append('({0},{1}) {2}'.format( cur_col, rowInd, _format_float(value))) else: smlist.append('({0},{1}) {2}'.format( rowInd, cur_col, _format_float(value))) spstr += "\n".join(smlist) if len(self.values) > 16: spstr += "\n.." * 2 return spstr def __repr__(self): """ Representation of a SparseMatrix >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]) >>> sm1 SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2.0, 3.0, 4.0], False) """ rowIndices = list(self.rowIndices) colPtrs = list(self.colPtrs) if len(self.values) <= 16: values = _format_float_list(self.values) else: values = ( _format_float_list(self.values[:8]) + ["..."] + _format_float_list(self.values[-8:]) ) rowIndices = rowIndices[:8] + ["..."] + rowIndices[-8:] if len(self.colPtrs) > 16: colPtrs = colPtrs[:8] + ["..."] + colPtrs[-8:] values = ", ".join(values) rowIndices = ", ".join([str(ind) for ind in rowIndices]) colPtrs = ", ".join([str(ptr) for ptr in colPtrs]) return "SparseMatrix({0}, {1}, [{2}], [{3}], [{4}], {5})".format( self.numRows, self.numCols, colPtrs, rowIndices, values, self.isTransposed) def __reduce__(self): return SparseMatrix, ( self.numRows, self.numCols, self.colPtrs.tostring(), self.rowIndices.tostring(), self.values.tostring(), int(self.isTransposed)) def __getitem__(self, indices): i, j = indices if i < 0 or i >= self.numRows: raise IndexError("Row index %d is out of range [0, %d)" % (i, self.numRows)) if j < 0 or j >= self.numCols: raise IndexError("Column index %d is out of range [0, %d)" % (j, self.numCols)) # If a CSR matrix is given, then the row index should be searched # for in ColPtrs, and the column index should be searched for in the # corresponding slice obtained from rowIndices. if self.isTransposed: j, i = i, j colStart = self.colPtrs[j] colEnd = self.colPtrs[j + 1] nz = self.rowIndices[colStart: colEnd] ind = np.searchsorted(nz, i) + colStart if ind < colEnd and self.rowIndices[ind] == i: return self.values[ind] else: return 0.0 def toArray(self): """ Return an numpy.ndarray """ A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F') for k in xrange(self.colPtrs.size - 1): startptr = self.colPtrs[k] endptr = self.colPtrs[k + 1] if self.isTransposed: A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr] else: A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr] return A def toDense(self): densevals = np.ravel(self.toArray(), order='F') return DenseMatrix(self.numRows, self.numCols, densevals) def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseMatrix` .. versionadded:: 2.0.0 """ return newlinalg.SparseMatrix(self.numRows, self.numCols, self.colPtrs, self.rowIndices, self.values, self.isTransposed) # TODO: More efficient implementation: def __eq__(self, other): return np.all(self.toArray() == other.toArray()) class Matrices(object): @staticmethod def dense(numRows, numCols, values): """ Create a DenseMatrix """ return DenseMatrix(numRows, numCols, values) @staticmethod def sparse(numRows, numCols, colPtrs, rowIndices, values): """ Create a SparseMatrix """ return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values) @staticmethod def fromML(mat): """ Convert a matrix from the new mllib-local representation. This does NOT copy the data; it copies references. :param mat: a :py:class:`pyspark.ml.linalg.Matrix` :return: a :py:class:`pyspark.mllib.linalg.Matrix` .. versionadded:: 2.0.0 """ if isinstance(mat, newlinalg.DenseMatrix): return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed) elif isinstance(mat, newlinalg.SparseMatrix): return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices, mat.values, mat.isTransposed) else: raise TypeError("Unsupported matrix type %s" % type(mat)) class QRDecomposition(object): """ Represents QR factors. """ def __init__(self, Q, R): self._Q = Q self._R = R @property @since('2.0.0') def Q(self): """ An orthogonal matrix Q in a QR decomposition. May be null if not computed. """ return self._Q @property @since('2.0.0') def R(self): """ An upper triangular matrix R in a QR decomposition. """ return self._R def _test(): import doctest (failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS) if failure_count: exit(-1) if __name__ == "__main__": _test()
apache-2.0
tkaitchuck/nupic
external/linux64/lib/python2.6/site-packages/PIL/ArgImagePlugin.py
14
12160
# # THIS IS WORK IN PROGRESS # # The Python Imaging Library. # $Id: ArgImagePlugin.py 2309 2005-03-02 15:06:34Z fredrik $ # # ARG animation support code # # history: # 1996-12-30 fl Created # 1996-01-06 fl Added safe scripting environment # 1996-01-10 fl Added JHDR, UHDR and sYNC support # 2005-03-02 fl Removed AAPP and ARUN support # # Copyright (c) Secret Labs AB 1997. # Copyright (c) Fredrik Lundh 1996-97. # # See the README file for information on usage and redistribution. # __version__ = "0.4" import marshal, string import Image, ImageFile, ImagePalette from PngImagePlugin import i16, i32, ChunkStream, _MODES MAGIC = "\212ARG\r\n\032\n" # -------------------------------------------------------------------- # ARG parser class ArgStream(ChunkStream): "Parser callbacks for ARG data" def __init__(self, fp): ChunkStream.__init__(self, fp) self.eof = 0 self.im = None self.palette = None self.__reset() def __reset(self): # reset decoder state (called on init and sync) self.count = 0 self.id = None self.action = ("NONE",) self.images = {} self.names = {} def chunk_AHDR(self, offset, bytes): "AHDR -- animation header" # assertions if self.count != 0: raise SyntaxError, "misplaced AHDR chunk" s = self.fp.read(bytes) self.size = i32(s), i32(s[4:]) try: self.mode, self.rawmode = _MODES[(ord(s[8]), ord(s[9]))] except: raise SyntaxError, "unknown ARG mode" if Image.DEBUG: print "AHDR size", self.size print "AHDR mode", self.mode, self.rawmode return s def chunk_AFRM(self, offset, bytes): "AFRM -- next frame follows" # assertions if self.count != 0: raise SyntaxError, "misplaced AFRM chunk" self.show = 1 self.id = 0 self.count = 1 self.repair = None s = self.fp.read(bytes) if len(s) >= 2: self.id = i16(s) if len(s) >= 4: self.count = i16(s[2:4]) if len(s) >= 6: self.repair = i16(s[4:6]) else: self.repair = None if Image.DEBUG: print "AFRM", self.id, self.count return s def chunk_ADEF(self, offset, bytes): "ADEF -- store image" # assertions if self.count != 0: raise SyntaxError, "misplaced ADEF chunk" self.show = 0 self.id = 0 self.count = 1 self.repair = None s = self.fp.read(bytes) if len(s) >= 2: self.id = i16(s) if len(s) >= 4: self.count = i16(s[2:4]) if Image.DEBUG: print "ADEF", self.id, self.count return s def chunk_NAME(self, offset, bytes): "NAME -- name the current image" # assertions if self.count == 0: raise SyntaxError, "misplaced NAME chunk" name = self.fp.read(bytes) self.names[self.id] = name return name def chunk_AEND(self, offset, bytes): "AEND -- end of animation" if Image.DEBUG: print "AEND" self.eof = 1 raise EOFError, "end of ARG file" def __getmodesize(self, s, full=1): size = i32(s), i32(s[4:]) try: mode, rawmode = _MODES[(ord(s[8]), ord(s[9]))] except: raise SyntaxError, "unknown image mode" if full: if ord(s[12]): pass # interlace not yet supported if ord(s[11]): raise SyntaxError, "unknown filter category" return size, mode, rawmode def chunk_PAST(self, offset, bytes): "PAST -- paste one image into another" # assertions if self.count == 0: raise SyntaxError, "misplaced PAST chunk" if self.repair is not None: # we must repair the target image before we # start pasting # brute force; a better solution would be to # update only the dirty rectangles in images[id]. # note that if images[id] doesn't exist, it must # be created self.images[self.id] = self.images[self.repair].copy() self.repair = None s = self.fp.read(bytes) im = self.images[i16(s)] x, y = i32(s[2:6]), i32(s[6:10]) bbox = x, y, im.size[0]+x, im.size[1]+y if im.mode in ["RGBA"]: # paste with transparency # FIXME: should handle P+transparency as well self.images[self.id].paste(im, bbox, im) else: # paste without transparency self.images[self.id].paste(im, bbox) self.action = ("PAST",) self.__store() return s def chunk_BLNK(self, offset, bytes): "BLNK -- create blank image" # assertions if self.count == 0: raise SyntaxError, "misplaced BLNK chunk" s = self.fp.read(bytes) size, mode, rawmode = self.__getmodesize(s, 0) # store image (FIXME: handle colour) self.action = ("BLNK",) self.im = Image.core.fill(mode, size, 0) self.__store() return s def chunk_IHDR(self, offset, bytes): "IHDR -- full image follows" # assertions if self.count == 0: raise SyntaxError, "misplaced IHDR chunk" # image header s = self.fp.read(bytes) size, mode, rawmode = self.__getmodesize(s) # decode and store image self.action = ("IHDR",) self.im = Image.core.new(mode, size) self.decoder = Image.core.zip_decoder(rawmode) self.decoder.setimage(self.im, (0,0) + size) self.data = "" return s def chunk_DHDR(self, offset, bytes): "DHDR -- delta image follows" # assertions if self.count == 0: raise SyntaxError, "misplaced DHDR chunk" s = self.fp.read(bytes) size, mode, rawmode = self.__getmodesize(s) # delta header diff = ord(s[13]) offs = i32(s[14:18]), i32(s[18:22]) bbox = offs + (offs[0]+size[0], offs[1]+size[1]) if Image.DEBUG: print "DHDR", diff, bbox # FIXME: decode and apply image self.action = ("DHDR", diff, bbox) # setup decoder self.im = Image.core.new(mode, size) self.decoder = Image.core.zip_decoder(rawmode) self.decoder.setimage(self.im, (0,0) + size) self.data = "" return s def chunk_JHDR(self, offset, bytes): "JHDR -- JPEG image follows" # assertions if self.count == 0: raise SyntaxError, "misplaced JHDR chunk" # image header s = self.fp.read(bytes) size, mode, rawmode = self.__getmodesize(s, 0) # decode and store image self.action = ("JHDR",) self.im = Image.core.new(mode, size) self.decoder = Image.core.jpeg_decoder(rawmode) self.decoder.setimage(self.im, (0,0) + size) self.data = "" return s def chunk_UHDR(self, offset, bytes): "UHDR -- uncompressed image data follows (EXPERIMENTAL)" # assertions if self.count == 0: raise SyntaxError, "misplaced UHDR chunk" # image header s = self.fp.read(bytes) size, mode, rawmode = self.__getmodesize(s, 0) # decode and store image self.action = ("UHDR",) self.im = Image.core.new(mode, size) self.decoder = Image.core.raw_decoder(rawmode) self.decoder.setimage(self.im, (0,0) + size) self.data = "" return s def chunk_IDAT(self, offset, bytes): "IDAT -- image data block" # pass compressed chunks through the decoder s = self.fp.read(bytes) self.data = self.data + s n, e = self.decoder.decode(self.data) if n < 0: # end of image if e < 0: raise IOError, "decoder error %d" % e else: self.data = self.data[n:] return s def chunk_DEND(self, offset, bytes): return self.chunk_IEND(offset, bytes) def chunk_JEND(self, offset, bytes): return self.chunk_IEND(offset, bytes) def chunk_UEND(self, offset, bytes): return self.chunk_IEND(offset, bytes) def chunk_IEND(self, offset, bytes): "IEND -- end of image" # we now have a new image. carry out the operation # defined by the image header. # won't need these anymore del self.decoder del self.data self.__store() return self.fp.read(bytes) def __store(self): # apply operation cid = self.action[0] if cid in ["BLNK", "IHDR", "JHDR", "UHDR"]: # store self.images[self.id] = self.im elif cid == "DHDR": # paste cid, mode, bbox = self.action im0 = self.images[self.id] im1 = self.im if mode == 0: im1 = im1.chop_add_modulo(im0.crop(bbox)) im0.paste(im1, bbox) self.count = self.count - 1 if self.count == 0 and self.show: self.im = self.images[self.id] raise EOFError # end of this frame def chunk_PLTE(self, offset, bytes): "PLTE -- palette data" s = self.fp.read(bytes) if self.mode == "P": self.palette = ImagePalette.raw("RGB", s) return s def chunk_sYNC(self, offset, bytes): "SYNC -- reset decoder" if self.count != 0: raise SyntaxError, "misplaced sYNC chunk" s = self.fp.read(bytes) self.__reset() return s # -------------------------------------------------------------------- # ARG reader def _accept(prefix): return prefix[:8] == MAGIC ## # Image plugin for the experimental Animated Raster Graphics format. class ArgImageFile(ImageFile.ImageFile): format = "ARG" format_description = "Animated raster graphics" def _open(self): if self.fp.read(8) != MAGIC: raise SyntaxError, "not an ARG file" self.arg = ArgStream(self.fp) # read and process the first chunk (AHDR) cid, offset, bytes = self.arg.read() if cid != "AHDR": raise SyntaxError, "expected an AHDR chunk" s = self.arg.call(cid, offset, bytes) self.arg.crc(cid, s) # image characteristics self.mode = self.arg.mode self.size = self.arg.size def load(self): if self.arg.im is None: self.seek(0) # image data self.im = self.arg.im self.palette = self.arg.palette # set things up for further processing Image.Image.load(self) def seek(self, frame): if self.arg.eof: raise EOFError, "end of animation" self.fp = self.arg.fp while 1: # # process chunks cid, offset, bytes = self.arg.read() if self.arg.eof: raise EOFError, "end of animation" try: s = self.arg.call(cid, offset, bytes) except EOFError: break except "glurk": # AttributeError if Image.DEBUG: print cid, bytes, "(unknown)" s = self.fp.read(bytes) self.arg.crc(cid, s) self.fp.read(4) # ship extra CRC def tell(self): return 0 def verify(self): "Verify ARG file" # back up to first chunk self.fp.seek(8) self.arg.verify(self) self.arg.close() self.fp = None # # -------------------------------------------------------------------- Image.register_open("ARG", ArgImageFile, _accept) Image.register_extension("ARG", ".arg") Image.register_mime("ARG", "video/x-arg")
gpl-3.0
edx/lettuce
tests/integration/lib/Django-1.2.5/django/db/backends/sqlite3/introspection.py
65
5857
import re from django.db.backends import BaseDatabaseIntrospection # This light wrapper "fakes" a dictionary interface, because some SQLite data # types include variables in them -- e.g. "varchar(30)" -- and can't be matched # as a simple dictionary lookup. class FlexibleFieldLookupDict: # Maps SQL types to Django Field types. Some of the SQL types have multiple # entries here because SQLite allows for anything and doesn't normalize the # field type; it uses whatever was given. base_data_types_reverse = { 'bool': 'BooleanField', 'boolean': 'BooleanField', 'smallint': 'SmallIntegerField', 'smallint unsigned': 'PositiveSmallIntegerField', 'smallinteger': 'SmallIntegerField', 'int': 'IntegerField', 'integer': 'IntegerField', 'bigint': 'BigIntegerField', 'integer unsigned': 'PositiveIntegerField', 'decimal': 'DecimalField', 'real': 'FloatField', 'text': 'TextField', 'char': 'CharField', 'date': 'DateField', 'datetime': 'DateTimeField', 'time': 'TimeField', } def __getitem__(self, key): key = key.lower() try: return self.base_data_types_reverse[key] except KeyError: import re m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key) if m: return ('CharField', {'max_length': int(m.group(1))}) raise KeyError class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = FlexibleFieldLookupDict() def get_table_list(self, cursor): "Returns a list of table names in the current database." # Skip the sqlite_sequence system table used for autoincrement key # generation. cursor.execute(""" SELECT name FROM sqlite_master WHERE type='table' AND NOT name='sqlite_sequence' ORDER BY name""") return [row[0] for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): "Returns a description of the table, with the DB-API cursor.description interface." return [(info['name'], info['type'], None, None, None, None, info['null_ok']) for info in self._table_info(cursor, table_name)] def get_relations(self, cursor, table_name): """ Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based. """ # Dictionary of relations to return relations = {} # Schema for this table cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(')+1:results.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_index, field_desc in enumerate(results.split(',')): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I) if not m: continue table, column = [s.strip('"') for s in m.groups()] cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table]) result = cursor.fetchone() if not result: continue other_table_results = result[0].strip() li, ri = other_table_results.index('('), other_table_results.rindex(')') other_table_results = other_table_results[li+1:ri] for other_index, other_desc in enumerate(other_table_results.split(',')): other_desc = other_desc.strip() if other_desc.startswith('UNIQUE'): continue name = other_desc.split(' ', 1)[0].strip('"') if name == column: relations[field_index] = (other_index, table) break return relations def get_indexes(self, cursor, table_name): """ Returns a dictionary of fieldname -> infodict for the given table, where each infodict is in the format: {'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index} """ indexes = {} for info in self._table_info(cursor, table_name): indexes[info['name']] = {'primary_key': info['pk'] != 0, 'unique': False} cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name)) # seq, name, unique for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]: if not unique: continue cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index)) info = cursor.fetchall() # Skip indexes across multiple fields if len(info) != 1: continue name = info[0][2] # seqno, cid, name indexes[name]['unique'] = True return indexes def _table_info(self, cursor, name): cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name)) # cid, name, type, notnull, dflt_value, pk return [{'name': field[1], 'type': field[2], 'null_ok': not field[3], 'pk': field[5] # undocumented } for field in cursor.fetchall()]
gpl-3.0
jbuchbinder/youtube-dl
youtube_dl/extractor/heise.py
176
2864
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, parse_iso8601, ) class HeiseIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:www\.)?heise\.de/video/artikel/ .+?(?P<id>[0-9]+)\.html(?:$|[?#]) ''' _TEST = { 'url': ( 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html' ), 'md5': 'ffed432483e922e88545ad9f2f15d30e', 'info_dict': { 'id': '2404147', 'ext': 'mp4', 'title': ( "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone" ), 'format_id': 'mp4_720p', 'timestamp': 1411812600, 'upload_date': '20140927', 'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.', 'thumbnail': 're:^https?://.*\.jpe?g$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) container_id = self._search_regex( r'<div class="videoplayerjw".*?data-container="([0-9]+)"', webpage, 'container ID') sequenz_id = self._search_regex( r'<div class="videoplayerjw".*?data-sequenz="([0-9]+)"', webpage, 'sequenz ID') data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id) doc = self._download_xml(data_url, video_id) info = { 'id': video_id, 'thumbnail': self._og_search_thumbnail(webpage), 'timestamp': parse_iso8601( self._html_search_meta('date', webpage)), 'description': self._og_search_description(webpage), } title = self._html_search_meta('fulltitle', webpage) if title: info['title'] = title else: info['title'] = self._og_search_title(webpage) formats = [] for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'): label = source_node.attrib['label'] height = int_or_none(self._search_regex( r'^(.*?_)?([0-9]+)p$', label, 'height', default=None)) video_url = source_node.attrib['file'] ext = determine_ext(video_url, '') formats.append({ 'url': video_url, 'format_note': label, 'format_id': '%s_%s' % (ext, label), 'height': height, }) self._sort_formats(formats) info['formats'] = formats return info
unlicense
JioEducation/edx-platform
lms/envs/content.py
168
1088
""" These are debug machines used for content creators, so they're kind of a cross between dev machines and AWS machines. """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=wildcard-import, unused-wildcard-import from .aws import * DEBUG = True TEMPLATE_DEBUG = True EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' ################################ DEBUG TOOLBAR ################################# INSTALLED_APPS += ('debug_toolbar',) MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) DEBUG_TOOLBAR_PANELS = ( 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', )
agpl-3.0
dhalleine/tensorflow
tensorflow/python/ops/parsing_ops.py
5
40273
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Parsing Ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re from tensorflow.python.framework import common_shapes from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_parsing_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops # go/tf-wildcard-import # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.gen_parsing_ops import * # pylint: enable=wildcard-import,undefined-variable ops.NoGradient("DecodeRaw") ops.NoGradient("StringToNumber") class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])): """Configuration for parsing a variable-length input feature. Fields: dtype: Data type of input. """ pass class FixedLenFeature(collections.namedtuple( "FixedLenFeature", ["shape", "dtype", "default_value"])): """Configuration for parsing a fixed-length input feature. To treat sparse input as dense, provide a `default_value`; otherwise, the parse functions will fail on any examples missing this feature. Fields: shape: Shape of input data. dtype: Data type of input. default_value: Value to be used if an example is missing this feature. It must be compatible with `dtype`. """ pass FixedLenFeature.__new__.__defaults__ = (None,) # NOTE: If we ever support a default_value for sequence dense features, we can # remove this class and use FixedLenFeature in its place. class FixedLenSequenceFeature(collections.namedtuple( "FixedLenSequenceFeature", ["shape", "dtype", "allow_missing"])): """Configuration for a dense input feature in a sequence item. To treat a sparse input as dense, provide `allow_missing=True`; otherwise, the parse functions will fail on any examples missing this feature. Fields: shape: Shape of input data. dtype: Data type of input. allow_missing: Whether to allow this feature to be missing from a feature list item. """ pass FixedLenSequenceFeature.__new__.__defaults__ = (False,) def _features_to_raw_params(features, types): """Split feature tuples into raw params used by `gen_parsing_ops`. Args: features: A `dict` mapping feature keys to objects of a type in `types`. types: Type of features to allow, among `FixedLenFeature`, `VarLenFeature`, and `FixedLenSequenceFeature`. Returns: Tuple of `sparse_keys`, `sparse_types`, `dense_keys`, `dense_types`, `dense_defaults`, `dense_shapes`. Raises: ValueError: if `features` contains an item not in `types`, or an invalid feature. """ sparse_keys = [] sparse_types = [] dense_keys = [] dense_types = [] dense_defaults = {} dense_shapes = [] if features: # NOTE: We iterate over sorted keys to keep things deterministic. for key in sorted(features.keys()): feature = features[key] if isinstance(feature, VarLenFeature): if VarLenFeature not in types: raise ValueError("Unsupported VarLenFeature %s.", feature) if not feature.dtype: raise ValueError("Missing type for feature %s." % key) sparse_keys.append(key) sparse_types.append(feature.dtype) elif isinstance(feature, FixedLenFeature): if FixedLenFeature not in types: raise ValueError("Unsupported FixedLenFeature %s.", feature) if not feature.dtype: raise ValueError("Missing type for feature %s." % key) if feature.shape is None: raise ValueError("Missing shape for feature %s." % key) dense_keys.append(key) dense_shapes.append(feature.shape) dense_types.append(feature.dtype) if feature.default_value is not None: dense_defaults[key] = feature.default_value elif isinstance(feature, FixedLenSequenceFeature): if FixedLenSequenceFeature not in types: raise ValueError("Unsupported FixedLenSequenceFeature %s.", feature) if not feature.dtype: raise ValueError("Missing type for feature %s." % key) if feature.shape is None: raise ValueError("Missing shape for feature %s." % key) dense_keys.append(key) dense_shapes.append(feature.shape) dense_types.append(feature.dtype) if feature.allow_missing: dense_defaults[key] = None else: raise ValueError("Invalid feature %s:%s." % (key, feature)) return ( sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults, dense_shapes) def parse_example(serialized, features, name=None, example_names=None): # pylint: disable=line-too-long """Parses `Example` protos into a `dict` of tensors. Parses a number of serialized [`Example`] (https://www.tensorflow.org/code/tensorflow/core/example/example.proto) protos given in `serialized`. `example_names` may contain descriptive names for the corresponding serialized protos. These may be useful for debugging purposes, but they have no effect on the output. If not `None`, `example_names` must be the same length as `serialized`. This op parses serialized examples into a dictionary mapping keys to `Tensor` and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature` and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature` is mapped to a `Tensor`. Each `VarLenFeature` maps to a `SparseTensor` of the specified type representing a ragged matrix. Its indices are `[batch, index]` where `batch` is the batch entry the value is from in `serialized`, and `index` is the value's index in the list of values associated with that feature and example. Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or `tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`. `FixedLenFeature` entries with a `default_value` are optional. With no default value, we will fail if that `Feature` is missing from any example in `serialized`. Examples: For example, if one expects a `tf.float32` sparse feature `ft` and three serialized `Example`s are provided: ``` serialized = [ features { feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } }, features { feature []}, features { feature { key: "ft" value { float_list { value: [3.0] } } } ] ``` then the output will look like: ``` {"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]], values=[1.0, 2.0, 3.0], shape=(3, 2)) } ``` Given two `Example` input protos in `serialized`: ``` [ features { feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } } feature { key: "gps" value { float_list { value: [] } } } }, features { feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } } feature { key: "dank" value { int64_list { value: [ 42 ] } } } feature { key: "gps" value { } } } ] ``` And arguments ``` example_names: ["input0", "input1"], features: { "kw": VarLenFeature(tf.string), "dank": VarLenFeature(tf.int64), "gps": VarLenFeature(tf.float32), } ``` Then the output is a dictionary: ```python { "kw": SparseTensor( indices=[[0, 0], [0, 1], [1, 0]], values=["knit", "big", "emmy"] shape=[2, 2]), "dank": SparseTensor( indices=[[1, 0]], values=[42], shape=[2, 1]), "gps": SparseTensor( indices=[], values=[], shape=[2, 0]), } ``` For dense results in two serialized `Example`s: ``` [ features { feature { key: "age" value { int64_list { value: [ 0 ] } } } feature { key: "gender" value { bytes_list { value: [ "f" ] } } } }, features { feature { key: "age" value { int64_list { value: [] } } } feature { key: "gender" value { bytes_list { value: [ "f" ] } } } } ] ``` We can use arguments: ``` example_names: ["input0", "input1"], features: { "age": FixedLenFeature([], dtype=tf.int64, default_value=-1), "gender": FixedLenFeature([], dtype=tf.string), } ``` And the expected output is: ```python { "age": [[0], [-1]], "gender": [["f"], ["f"]], } ``` Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. name: A name for this operation (optional). example_names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos in the batch. Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid. """ if not features: raise ValueError("Missing: features was %s." % features) (sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults, dense_shapes) = _features_to_raw_params( features, [VarLenFeature, FixedLenFeature]) return _parse_example_raw( serialized, example_names, sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults, dense_shapes, name) def _parse_example_raw(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name=None): """Parses `Example` protos. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. sparse_keys: A list of string keys in the examples' features. The results for these keys will be returned as `SparseTensor` objects. sparse_types: A list of `DTypes` of the same length as `sparse_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. dense_keys: A list of string keys in the examples' features. The results for these keys will be returned as `Tensor`s dense_types: A list of DTypes of the same length as `dense_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. dense_defaults: A dict mapping string keys to `Tensor`s. The keys of the dict must match the dense_keys of the feature. dense_shapes: A list of tuples with the same length as `dense_keys`. The shape of the data for each dense feature referenced by `dense_keys`. Required for any input tensors identified by `dense_keys` whose shapes are anything other than `[]` or `[1]`. name: A name for this operation (optional). Returns: A `dict` mapping keys to `Tensor`s and `SparseTensor`s. Raises: ValueError: If sparse and dense key sets intersect, or input lengths do not match up. """ with ops.op_scope([serialized, names], name, "ParseExample"): names = [] if names is None else names dense_defaults = {} if dense_defaults is None else dense_defaults sparse_keys = [] if sparse_keys is None else sparse_keys sparse_types = [] if sparse_types is None else sparse_types dense_keys = [] if dense_keys is None else dense_keys dense_types = [] if dense_types is None else dense_types dense_shapes = ( [[]] * len(dense_keys) if dense_shapes is None else dense_shapes) num_dense = len(dense_keys) num_sparse = len(sparse_keys) if len(dense_shapes) != num_dense: raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d" % (len(dense_shapes), num_dense)) if len(dense_types) != num_dense: raise ValueError("len(dense_types) != len(num_dense): %d vs. %d" % (len(dense_types), num_dense)) if len(sparse_types) != num_sparse: raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d" % (len(sparse_types), num_sparse)) if num_dense + num_sparse == 0: raise ValueError("Must provide at least one sparse key or dense key") if not set(dense_keys).isdisjoint(set(sparse_keys)): raise ValueError( "Dense and sparse keys must not intersect; intersection: %s" % set(dense_keys).intersection(set(sparse_keys))) dense_defaults_vec = [] for i, key in enumerate(dense_keys): default_value = dense_defaults.get(key) if default_value is None: default_value = constant_op.constant([], dtype=dense_types[i]) elif not isinstance(default_value, ops.Tensor): key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key) default_value = ops.convert_to_tensor( default_value, dtype=dense_types[i], name=key_name) default_value = array_ops.reshape(default_value, dense_shapes[i]) dense_defaults_vec.append(default_value) dense_shapes = [tensor_shape.as_shape(shape).as_proto() for shape in dense_shapes] # pylint: disable=protected-access outputs = gen_parsing_ops._parse_example( serialized=serialized, names=names, dense_defaults=dense_defaults_vec, sparse_keys=sparse_keys, sparse_types=sparse_types, dense_keys=dense_keys, dense_shapes=dense_shapes, name=name) # pylint: enable=protected-access (sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs sparse_tensors = [ops.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(sparse_indices, sparse_values, sparse_shapes)] return dict( zip(sparse_keys + dense_keys, sparse_tensors + dense_values)) def parse_single_example(serialized, features, name=None, example_names=None): """Parses a single `Example` proto. Similar to `parse_example`, except: For dense tensors, the returned `Tensor` is identical to the output of `parse_example`, except there is no batch dimension, the output shape is the same as the shape given in `dense_shape`. For `SparseTensor`s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first (`batch_size`) entry of the shape vector is removed (it is now a single element vector). Args: serialized: A scalar string Tensor, a single serialized Example. See `_parse_single_example_raw` documentation for more details. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. name: A name for this operation (optional). example_names: (Optional) A scalar string Tensor, the associated name. See `_parse_single_example_raw` documentation for more details. Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid. """ if not features: raise ValueError("Missing features.") (sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults, dense_shapes) = _features_to_raw_params( features, [VarLenFeature, FixedLenFeature]) return _parse_single_example_raw( serialized, example_names, sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults, dense_shapes, name) def _parse_single_example_raw(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name=None): """Parses a single `Example` proto. Args: serialized: A scalar string Tensor, a single serialized Example. See `_parse_example_raw` documentation for more details. names: (Optional) A scalar string Tensor, the associated name. See `_parse_example_raw` documentation for more details. sparse_keys: See `_parse_example_raw` documentation for more details. sparse_types: See `_parse_example_raw` documentation for more details. dense_keys: See `_parse_example_raw` documentation for more details. dense_types: See `_parse_example_raw` documentation for more details. dense_defaults: See `_parse_example_raw` documentation for more details. dense_shapes: See `_parse_example_raw` documentation for more details. name: A name for this operation (optional). Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid. """ with ops.op_scope([serialized, names], name, "ParseSingleExample"): serialized = ops.convert_to_tensor(serialized) serialized_shape = serialized.get_shape() if serialized_shape.ndims is not None: if serialized_shape.ndims != 0: raise ValueError("Input serialized must be a scalar") else: serialized = control_flow_ops.with_dependencies( [logging_ops.Assert( math_ops.equal(array_ops.rank(serialized), 0), ["Input serialized must be a scalar"], name="SerializedIsScalar")], serialized, name="SerializedDependencies") serialized = array_ops.expand_dims(serialized, 0) if names is not None: names = ops.convert_to_tensor(names) names_shape = names.get_shape() if names_shape.ndims is not None: if names_shape.ndims != 0: raise ValueError("Input names must be a scalar") else: names = control_flow_ops.with_dependencies( [logging_ops.Assert( math_ops.equal(array_ops.rank(names), 0), ["Input names must be a scalar"], name="NamesIsScalar")], names, name="NamesDependencies") names = array_ops.expand_dims(names, 0) outputs = _parse_example_raw(serialized, names=names, sparse_keys=sparse_keys, sparse_types=sparse_types, dense_keys=dense_keys, dense_types=dense_types, dense_defaults=dense_defaults, dense_shapes=dense_shapes, name=name) if dense_keys is not None: for d in dense_keys: d_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", d) outputs[d] = array_ops.squeeze( outputs[d], [0], name="Squeeze_%s" % d_name) if sparse_keys is not None: for s in sparse_keys: s_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", s) outputs[s] = ops.SparseTensor( array_ops.slice(outputs[s].indices, [0, 1], [-1, -1], name="Slice_Indices_%s" % s_name), outputs[s].values, array_ops.slice(outputs[s].shape, [1], [-1], name="Squeeze_Shape_%s" % s_name)) return outputs @ops.RegisterShape("ParseExample") def _ParseExampleShape(op): # pylint: disable=invalid-name """Shape function for the ParseExample op.""" input_shape = op.inputs[0].get_shape().with_rank(1) op.inputs[1].get_shape().with_rank(1) # names num_sparse = op.get_attr("Nsparse") num_dense = op.get_attr("Ndense") dense_shapes = op.get_attr("dense_shapes") sparse_index_shapes = [ tensor_shape.matrix(None, 2) for _ in range(num_sparse)] sparse_value_shapes = [tensor_shape.vector(None) for _ in range(num_sparse)] sparse_shape_shapes = [tensor_shape.vector(2) for _ in range(num_sparse)] assert num_dense == len(dense_shapes) dense_shapes = [ input_shape.concatenate(dense_shape) for dense_shape in dense_shapes] return (sparse_index_shapes + sparse_value_shapes + sparse_shape_shapes + dense_shapes) def parse_single_sequence_example( serialized, context_features=None, sequence_features=None, example_name=None, name=None): # pylint: disable=line-too-long """Parses a single `SequenceExample` proto. Parses a single serialized [`SequenceExample`] (https://www.tensorflow.org/code/tensorflow/core/example/example.proto) proto given in `serialized`. This op parses a serialize sequence example into a tuple of dictionaries mapping keys to `Tensor` and `SparseTensor` objects respectively. The first dictionary contains mappings for keys appearing in `context_features`, and the second dictionary contains mappings for keys appearing in `sequence_features`. At least one of `context_features` and `sequence_features` must be provided and non-empty. The `context_features` keys are associated with a `SequenceExample` as a whole, independent of time / frame. In contrast, the `sequence_features` keys provide a way to access variable-length data within the `FeatureList` section of the `SequenceExample` proto. While the shapes of `context_features` values are fixed with respect to frame, the frame dimension (the first dimension) of `sequence_features` values may vary between `SequenceExample` protos, and even between `feature_list` keys within the same `SequenceExample`. `context_features` contains `VarLenFeature` and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature` is mapped to a `Tensor`, of the specified type, shape, and default value. `sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type. The shape will be `(T,) + df.shape` for `FixedLenSequenceFeature` `df`, where `T` is the length of the associated `FeatureList` in the `SequenceExample`. For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of static shape `[None]` and dynamic shape `[T]`, while `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor` of static shape `[None, k]` and dynamic shape `[T, k]`. Each `SparseTensor` corresponding to `sequence_features` represents a ragged vector. Its indices are `[time, index]`, where `time` is the `FeatureList` entry and `index` is the value's index in the list of values associated with that time. `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature` entries with `allow_missing=True` are optional; otherwise, we will fail if that `Feature` or `FeatureList` is missing from any example in `serialized`. `example_name` may contain a descriptive name for the corresponding serialized proto. This may be useful for debugging purposes, but it has no effect on the output. If not `None`, `example_name` must be a scalar. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. These features are associated with a `SequenceExample` as a whole. sequence_features: A `dict` mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature` values. These features are associated with data within the `FeatureList` section of the `SequenceExample` proto. example_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s. The first dict contains the context key/values. The second dict contains the feature_list key/values. Raises: ValueError: if any feature is invalid. """ # pylint: enable=line-too-long if not (context_features or sequence_features): raise ValueError("Missing features.") (context_sparse_keys, context_sparse_types, context_dense_keys, context_dense_types, context_dense_defaults, context_dense_shapes) = _features_to_raw_params( context_features, [VarLenFeature, FixedLenFeature]) (feature_list_sparse_keys, feature_list_sparse_types, feature_list_dense_keys, feature_list_dense_types, feature_list_dense_defaults, feature_list_dense_shapes) = _features_to_raw_params( sequence_features, [VarLenFeature, FixedLenSequenceFeature]) return _parse_single_sequence_example_raw( serialized, context_sparse_keys, context_sparse_types, context_dense_keys, context_dense_types, context_dense_defaults, context_dense_shapes, feature_list_sparse_keys, feature_list_sparse_types, feature_list_dense_keys, feature_list_dense_types, feature_list_dense_shapes, feature_list_dense_defaults, example_name, name) def _parse_single_sequence_example_raw(serialized, context_sparse_keys=None, context_sparse_types=None, context_dense_keys=None, context_dense_types=None, context_dense_defaults=None, context_dense_shapes=None, feature_list_sparse_keys=None, feature_list_sparse_types=None, feature_list_dense_keys=None, feature_list_dense_types=None, feature_list_dense_shapes=None, feature_list_dense_defaults=None, debug_name=None, name=None): """Parses a single `SequenceExample` proto. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context_sparse_keys: A list of string keys in the `SequenceExample`'s features. The results for these keys will be returned as `SparseTensor` objects. context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. context_dense_keys: A list of string keys in the examples' features. The results for these keys will be returned as `Tensor`s context_dense_types: A list of DTypes, same length as `context_dense_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. context_dense_defaults: A dict mapping string keys to `Tensor`s. The keys of the dict must match the context_dense_keys of the feature. context_dense_shapes: A list of tuples, same length as `context_dense_keys`. The shape of the data for each context_dense feature referenced by `context_dense_keys`. Required for any input tensors identified by `context_dense_keys` whose shapes are anything other than `[]` or `[1]`. feature_list_sparse_keys: A list of string keys in the `SequenceExample`'s feature_lists. The results for these keys will be returned as `SparseTensor` objects. feature_list_sparse_types: A list of `DTypes`, same length as `sparse_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. feature_list_dense_keys: A list of string keys in the `SequenceExample`'s features_lists. The results for these keys will be returned as `Tensor`s. feature_list_dense_types: A list of `DTypes`, same length as `feature_list_dense_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. feature_list_dense_shapes: A list of tuples, same length as `feature_list_dense_keys`. The shape of the data for each `FeatureList` feature referenced by `feature_list_dense_keys`. feature_list_dense_defaults: A dict mapping key strings to values. The only currently allowed value is `None`. Any key appearing in this dict with value `None` is allowed to be missing from the `SequenceExample`. If missing, the key is treated as zero-length. debug_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s. The first dict contains the context key/values. The second dict contains the feature_list key/values. Raises: ValueError: If context_sparse and context_dense key sets intersect, if input lengths do not match up, or if a value in feature_list_dense_defaults is not None. TypeError: if feature_list_dense_defaults is not either None or a dict. """ with ops.op_scope([serialized], name, "ParseSingleSequenceExample"): context_dense_defaults = ( {} if context_dense_defaults is None else context_dense_defaults) context_sparse_keys = ( [] if context_sparse_keys is None else context_sparse_keys) context_sparse_types = ( [] if context_sparse_types is None else context_sparse_types) context_dense_keys = ( [] if context_dense_keys is None else context_dense_keys) context_dense_types = ( [] if context_dense_types is None else context_dense_types) context_dense_shapes = ( [[]] * len(context_dense_keys) if context_dense_shapes is None else context_dense_shapes) feature_list_sparse_keys = ( [] if feature_list_sparse_keys is None else feature_list_sparse_keys) feature_list_sparse_types = ( [] if feature_list_sparse_types is None else feature_list_sparse_types) feature_list_dense_keys = ( [] if feature_list_dense_keys is None else feature_list_dense_keys) feature_list_dense_types = ( [] if feature_list_dense_types is None else feature_list_dense_types) feature_list_dense_shapes = ( [[]] * len(feature_list_dense_keys) if feature_list_dense_shapes is None else feature_list_dense_shapes) feature_list_dense_defaults = ( dict() if feature_list_dense_defaults is None else feature_list_dense_defaults) debug_name = "" if debug_name is None else debug_name # Internal feature_list_dense_missing_assumed_empty = [] num_context_dense = len(context_dense_keys) num_feature_list_dense = len(feature_list_dense_keys) num_context_sparse = len(context_sparse_keys) num_feature_list_sparse = len(feature_list_sparse_keys) if len(context_dense_shapes) != num_context_dense: raise ValueError( "len(context_dense_shapes) != len(context_dense_keys): %d vs. %d" % (len(context_dense_shapes), num_context_dense)) if len(context_dense_types) != num_context_dense: raise ValueError( "len(context_dense_types) != len(num_context_dense): %d vs. %d" % (len(context_dense_types), num_context_dense)) if len(feature_list_dense_shapes) != num_feature_list_dense: raise ValueError( "len(feature_list_dense_shapes) != len(feature_list_dense_keys): " "%d vs. %d" % (len(feature_list_dense_shapes), num_feature_list_dense)) if len(feature_list_dense_types) != num_feature_list_dense: raise ValueError( "len(feature_list_dense_types) != len(num_feature_list_dense):" "%d vs. %d" % (len(feature_list_dense_types), num_feature_list_dense)) if len(context_sparse_types) != num_context_sparse: raise ValueError( "len(context_sparse_types) != len(context_sparse_keys): %d vs. %d" % (len(context_sparse_types), num_context_sparse)) if len(feature_list_sparse_types) != num_feature_list_sparse: raise ValueError( "len(feature_list_sparse_types) != len(feature_list_sparse_keys): " "%d vs. %d" % (len(feature_list_sparse_types), num_feature_list_sparse)) if (num_context_dense + num_context_sparse + num_feature_list_dense + num_feature_list_sparse) == 0: raise ValueError( "Must provide at least one context_sparse key, context_dense key, " ", feature_list_sparse key, or feature_list_dense key") if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)): raise ValueError( "context_dense and context_sparse keys must not intersect; " "intersection: %s" % set(context_dense_keys).intersection(set(context_sparse_keys))) if not set(feature_list_dense_keys).isdisjoint( set(feature_list_sparse_keys)): raise ValueError( "feature_list_dense and feature_list_sparse keys must not intersect; " "intersection: %s" % set(feature_list_dense_keys).intersection( set(feature_list_sparse_keys))) if not isinstance(feature_list_dense_defaults, dict): raise TypeError("feature_list_dense_defaults must be a dict") for k, v in feature_list_dense_defaults.items(): if v is not None: raise ValueError("Value feature_list_dense_defaults[%s] must be None" % k) feature_list_dense_missing_assumed_empty.append(k) context_dense_defaults_vec = [] for i, key in enumerate(context_dense_keys): default_value = context_dense_defaults.get(key) if default_value is None: default_value = constant_op.constant([], dtype=context_dense_types[i]) elif not isinstance(default_value, ops.Tensor): key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key) default_value = ops.convert_to_tensor( default_value, dtype=context_dense_types[i], name=key_name) default_value = array_ops.reshape( default_value, context_dense_shapes[i]) context_dense_defaults_vec.append(default_value) context_dense_shapes = [tensor_shape.as_shape(shape).as_proto() for shape in context_dense_shapes] feature_list_dense_shapes = [tensor_shape.as_shape(shape).as_proto() for shape in feature_list_dense_shapes] # pylint: disable=protected-access outputs = gen_parsing_ops._parse_single_sequence_example( serialized=serialized, debug_name=debug_name, context_dense_defaults=context_dense_defaults_vec, context_sparse_keys=context_sparse_keys, context_sparse_types=context_sparse_types, context_dense_keys=context_dense_keys, context_dense_shapes=context_dense_shapes, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_keys=feature_list_dense_keys, feature_list_dense_types=feature_list_dense_types, feature_list_dense_shapes=feature_list_dense_shapes, feature_list_dense_missing_assumed_empty=( feature_list_dense_missing_assumed_empty), name=name) # pylint: enable=protected-access (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values) = outputs context_sparse_tensors = [ ops.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(context_sparse_indices, context_sparse_values, context_sparse_shapes)] feature_list_sparse_tensors = [ ops.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes)] context_output = dict( zip(context_sparse_keys + context_dense_keys, context_sparse_tensors + context_dense_values)) feature_list_output = dict( zip(feature_list_sparse_keys + feature_list_dense_keys, feature_list_sparse_tensors + feature_list_dense_values)) return (context_output, feature_list_output) @ops.RegisterShape("ParseSingleSequenceExample") def _ParseSingleSequenceExampleShape(op): # pylint: disable=invalid-name """Shape function for the ParseExample op.""" op.inputs[0].get_shape().with_rank(0) # input # feature_list_dense_missing_assumed_empty op.inputs[1].get_shape().with_rank(1) num_context_sparse = op.get_attr("Ncontext_sparse") num_context_dense = op.get_attr("Ncontext_dense") num_feature_list_dense = op.get_attr("Nfeature_list_dense") context_dense_shapes = op.get_attr("context_dense_shapes") num_feature_list_sparse = op.get_attr("Nfeature_list_sparse") feature_list_dense_shapes = op.get_attr("feature_list_dense_shapes") context_sparse_index_shapes = [ tensor_shape.matrix(None, 1) for _ in range(num_context_sparse)] context_sparse_value_shapes = [ tensor_shape.vector(None) for _ in range(num_context_sparse)] context_sparse_shape_shapes = [ tensor_shape.vector(1) for _ in range(num_context_sparse)] context_dense_shapes = [ tensor_shape.TensorShape(dense_shape) for dense_shape in context_dense_shapes] feature_list_sparse_index_shapes = [ tensor_shape.matrix(None, 2) for _ in range(num_feature_list_sparse)] feature_list_sparse_value_shapes = [ tensor_shape.vector(None) for _ in range(num_feature_list_sparse)] feature_list_sparse_shape_shapes = [ tensor_shape.vector(2) for _ in range(num_feature_list_sparse)] feature_list_dense_shapes = [ tensor_shape.vector(None).concatenate(dense_shape) for dense_shape in feature_list_dense_shapes] assert num_context_dense == len(context_dense_shapes) assert num_feature_list_dense == len(feature_list_dense_shapes) return (context_sparse_index_shapes + context_sparse_value_shapes + context_sparse_shape_shapes + context_dense_shapes + feature_list_sparse_index_shapes + feature_list_sparse_value_shapes + feature_list_sparse_shape_shapes + feature_list_dense_shapes) ops.RegisterShape("DecodeJSONExample")(common_shapes.unchanged_shape) ops.RegisterShape("StringToNumber")(common_shapes.unchanged_shape) @ops.RegisterShape("DecodeRaw") def _DecodeRawShape(op): # pylint: disable=invalid-name """Shape function for the DecodeRaw op.""" # NOTE(mrry): Last dimension is data-dependent. return [op.inputs[0].get_shape().concatenate([None])] @ops.RegisterShape("DecodeCSV") def _DecodeCSVShape(op): # pylint: disable=invalid-name """Shape function for the DecodeCSV op.""" input_shape = op.inputs[0].get_shape() # Optionally check that all of other inputs are scalar or empty. for default_input in op.inputs[1:]: default_input_shape = default_input.get_shape().with_rank(1) if default_input_shape[0] > 1: raise ValueError( "Shape of a default must be a length-0 or length-1 vector.") return [input_shape] * len(op.outputs)
apache-2.0
DavidPowell/OpenModes
openmodes/operator/operator.py
1
8460
# -*- coding: utf-8 -*- #----------------------------------------------------------------------------- # OpenModes - An eigenmode solver for open electromagnetic resonantors # Copyright (C) 2013 David Powell # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #----------------------------------------------------------------------------- import numpy as np import logging from openmodes.eig import (eig_linearised, eig_newton, poles_cauchy, ConvergenceError) from openmodes.array import LookupArray class Operator(object): "A base class for operator equations" def impedance(self, s, parent_o, parent_s, metadata=None): """Evaluate the self and mutual impedances of all parts in the simulation. Return an `ImpedancePart` object which can calculate several derived impedance quantities Parameters ---------- s : number Complex frequency at which to calculate impedance (in rad/s) parent : Part Only this part and its sub-parts will be calculated Returns ------- impedance_matrices : ImpedanceParts The impedance matrix object which can represent the impedance of the object in several ways. """ metadata = metadata or dict() symmetric = self.reciprocal and (parent_o == parent_s) Z = self.impedance_class(parent_o, parent_s, self.basis_container, self.sources, self.unknowns) # set the common metadata Z.md['s'] = s Z.md['symmetric'] = symmetric Z.md['operator'] = self Z.md.update(metadata) for count_o, part_o in enumerate(parent_o.iter_single()): for count_s, part_s in enumerate(parent_s.iter_single()): if symmetric and count_s < count_o: Z[part_o, part_s] = Z[part_s, part_o].T else: self.impedance_single_parts(Z, s, part_o, part_s) return Z def gram_matrix(self, part): """Create a Gram matrix as a LookupArray""" G = self.basis_container[part].gram_matrix Gp = LookupArray((self.unknowns, (part, self.basis_container), self.sources, (part, self.basis_container)), dtype=G.dtype) Gp[:] = 0.0 for unknown, source in zip(self.unknowns, self.sources): Gp[unknown, :, source, :] = G return Gp def estimate_poles(self, contour, part, threshold=1e-11, previous_result=None, cauchy_integral=True, modes=None, **kwargs): """Estimate pole location for an operator by Cauchy integration or the simpler quasi-static method""" if not cauchy_integral: # Use the simpler quasi-static method (contour will actually # just be a starting frequency) Z = self.impedance(contour, part, part) estimate_s, estimate_vr = eig_linearised(Z, modes) result = {'s': estimate_s, 'vr': estimate_vr, 'vl': estimate_vr} else: def Z_func(s): Z = self.impedance(s, part, part) return Z.val().simple_view() result = poles_cauchy(Z_func, contour, threshold, previous_result=previous_result, **kwargs) return result def refine_poles(self, estimates, part, rel_tol, max_iter, iter_wrap = lambda x: x): """Find the poles of the operator applied to a specified part Parameters ---------- estimates : dictionary The data for the estimated poles rel_tol : float The relative tolerance on the search for singularities max_iter : integer The maximum number of iterations to use when searching for singularities Returns ------- refined : dictionary The refined poles """ #part = estimates['part'] logging.info("Finding poles for part %s" % str(part.id)) num_modes = len(estimates['s']) refined = {'s': []} refined['vr'] = [] refined['vl'] = [] # Adaptively check if the operator provides frequency derivatives, and # if so use them in the Newton iteration to find the poles. if self.frequency_derivatives: logging.info("Using exact impedance derivatives") def Z_func(s): Z = self.impedance(s, part, part) return Z.val().simple_view(), Z.frequency_derivative().simple_view() else: logging.info("Using approximate impedance derivatives") def Z_func(s): Z = self.impedance(s, part, part) return Z.val().simple_view() symmetric = self.reciprocal # weight_type = 'max element' if symmetric: weight_type = 'rayleigh symmetric' else: weight_type = 'rayleigh asymmetric' # Note that mode refers to the position in the array modes, which # at this point need not correspond to the original mode numbering for mode in iter_wrap(range(num_modes)): logging.info("Searching for mode %d"%mode) try: res = eig_newton(Z_func, estimates['s'][mode], estimates['vr'][:, mode], weight=weight_type, lambda_tol=rel_tol, max_iter=max_iter, func_gives_der=self.frequency_derivatives, y_0=estimates['vl'][mode, :]) except (ConvergenceError, ValueError): logging.warn("Mode {} convergence failed, mode discarded".format(mode)) continue logging.info("Converged after %d iterations\n" "%+.4e %+.4ej (linearised solution)\n" "%+.4e %+.4ej (nonlinear solution)" % (res['iter_count'], estimates['s'][mode].real, estimates['s'][mode].imag, res['eigval'].real, res['eigval'].imag)) refined['s'].append(res['eigval']) refined['vr'].append(res['eigvec']) refined['vl'].append(res['eigvec_left']) # convert lists to arrays refined['s'] = np.array(refined['s']) refined['vr'] = np.array(refined['vr']).T refined['vl'] = np.array(refined['vl']) return refined def source_vector(self, source_field, s, parent, extinction_field=False): "Calculate the relevant source vector for this operator" if extinction_field: fields = self.extinction_fields else: fields = self.sources V = LookupArray((fields, (parent, self.basis_container)), dtype=np.complex128) # define the functions to interpolate over the mesh def elec_func(r): return source_field.electric_field(s, r) def mag_func(r): return source_field.magnetic_field(s, r) for field in fields: if field in ("E", "nxE"): field_func = elec_func source_cross = field == "nxE" elif field in ("H", "nxH"): field_func = mag_func source_cross = field == "nxH" else: raise ValueError(field) for part in parent.iter_single(): basis = self.basis_container[part] V[field, part] = basis.weight_function(field_func, self.integration_rule, part.nodes, source_cross) return V
gpl-3.0
stanchan/jenkins-job-builder
jenkins_jobs/cmd.py
2
13766
#!/usr/bin/env python # Copyright (C) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import io from six.moves import configparser, StringIO import fnmatch import logging import os import platform import sys import yaml import jenkins_jobs.version from jenkins_jobs.builder import Builder from jenkins_jobs.errors import JenkinsJobsException logging.basicConfig(level=logging.INFO) logger = logging.getLogger() DEFAULT_CONF = """ [job_builder] keep_descriptions=False ignore_cache=False recursive=False exclude=.* allow_duplicates=False allow_empty_variables=False [jenkins] url=http://localhost:8080/ query_plugins_info=True [hipchat] authtoken=dummy send-as=Jenkins """ def confirm(question): answer = raw_input('%s (Y/N): ' % question).upper().strip() if not answer == 'Y': sys.exit('Aborted') def recurse_path(root, excludes=None): if excludes is None: excludes = [] basepath = os.path.realpath(root) pathlist = [basepath] patterns = [e for e in excludes if os.path.sep not in e] absolute = [e for e in excludes if os.path.isabs(e)] relative = [e for e in excludes if os.path.sep in e and not os.path.isabs(e)] for root, dirs, files in os.walk(basepath, topdown=True): dirs[:] = [ d for d in dirs if not any([fnmatch.fnmatch(d, pattern) for pattern in patterns]) if not any([fnmatch.fnmatch(os.path.abspath(os.path.join(root, d)), path) for path in absolute]) if not any([fnmatch.fnmatch(os.path.relpath(os.path.join(root, d)), path) for path in relative]) ] pathlist.extend([os.path.join(root, path) for path in dirs]) return pathlist def create_parser(): parser = argparse.ArgumentParser() recursive_parser = argparse.ArgumentParser(add_help=False) recursive_parser.add_argument('-r', '--recursive', action='store_true', dest='recursive', default=False, help='look for yaml files recursively') recursive_parser.add_argument('-x', '--exclude', dest='exclude', action='append', default=[], help='paths to exclude when using recursive' ' search, uses standard globbing.') subparser = parser.add_subparsers(help='update, test or delete job', dest='command') # subparser: update parser_update = subparser.add_parser('update', parents=[recursive_parser]) parser_update.add_argument('path', help='colon-separated list of paths to' ' YAML files or directories') parser_update.add_argument('names', help='name(s) of job(s)', nargs='*') parser_update.add_argument('--delete-old', help='delete obsolete jobs', action='store_true', dest='delete_old', default=False,) # subparser: test parser_test = subparser.add_parser('test', parents=[recursive_parser]) parser_test.add_argument('path', help='colon-separated list of paths to' ' YAML files or directories', nargs='?', default=sys.stdin) parser_test.add_argument('-p', dest='plugins_info_path', default=None, help='path to plugin info YAML file') parser_test.add_argument('-o', dest='output_dir', default=sys.stdout, help='path to output XML') parser_test.add_argument('name', help='name(s) of job(s)', nargs='*') # subparser: delete parser_delete = subparser.add_parser('delete', parents=[recursive_parser]) parser_delete.add_argument('name', help='name of job', nargs='+') parser_delete.add_argument('-p', '--path', default=None, help='colon-separated list of paths to' ' YAML files or directories') # subparser: delete-all subparser.add_parser('delete-all', help='delete *ALL* jobs from Jenkins server, ' 'including those not managed by Jenkins Job ' 'Builder.') parser.add_argument('--conf', dest='conf', help='configuration file') parser.add_argument('-l', '--log_level', dest='log_level', default='info', help="log level (default: %(default)s)") parser.add_argument( '--ignore-cache', action='store_true', dest='ignore_cache', default=False, help='ignore the cache and update the jobs anyhow (that will only ' 'flush the specified jobs cache)') parser.add_argument( '--flush-cache', action='store_true', dest='flush_cache', default=False, help='flush all the cache entries before updating') parser.add_argument('--version', dest='version', action='version', version=version(), help='show version') parser.add_argument( '--allow-empty-variables', action='store_true', dest='allow_empty_variables', default=None, help='Don\'t fail if any of the variables inside any string are not ' 'defined, replace with empty string instead') return parser def main(argv=None): # We default argv to None and assign to sys.argv[1:] below because having # an argument default value be a mutable type in Python is a gotcha. See # http://bit.ly/1o18Vff if argv is None: argv = sys.argv[1:] parser = create_parser() options = parser.parse_args(argv) if not options.command: parser.error("Must specify a 'command' to be performed") if (options.log_level is not None): options.log_level = getattr(logging, options.log_level.upper(), logger.getEffectiveLevel()) logger.setLevel(options.log_level) config = setup_config_settings(options) execute(options, config) def setup_config_settings(options): conf = '/etc/jenkins_jobs/jenkins_jobs.ini' if options.conf: conf = options.conf else: # Fallback to script directory localconf = os.path.join(os.path.dirname(__file__), 'jenkins_jobs.ini') if os.path.isfile(localconf): conf = localconf config = configparser.ConfigParser() # Load default config always config.readfp(StringIO(DEFAULT_CONF)) if os.path.isfile(conf): logger.debug("Reading config from {0}".format(conf)) conffp = io.open(conf, 'r', encoding='utf-8') config.readfp(conffp) elif options.command == 'test': logger.debug("Not requiring config for test output generation") else: raise JenkinsJobsException( "A valid configuration file is required when not run as a test" "\n{0} is not a valid .ini file".format(conf)) return config def execute(options, config): logger.debug("Config: {0}".format(config)) # check the ignore_cache setting: first from command line, # if not present check from ini file ignore_cache = False if options.ignore_cache: ignore_cache = options.ignore_cache elif config.has_option('jenkins', 'ignore_cache'): logging.warn('ignore_cache option should be moved to the [job_builder]' ' section in the config file, the one specified in the ' '[jenkins] section will be ignored in the future') ignore_cache = config.getboolean('jenkins', 'ignore_cache') elif config.has_option('job_builder', 'ignore_cache'): ignore_cache = config.getboolean('job_builder', 'ignore_cache') # Jenkins supports access as an anonymous user, which can be used to # ensure read-only behaviour when querying the version of plugins # installed for test mode to generate XML output matching what will be # uploaded. To enable must pass 'None' as the value for user and password # to python-jenkins # # catching 'TypeError' is a workaround for python 2.6 interpolation error # https://bugs.launchpad.net/openstack-ci/+bug/1259631 try: user = config.get('jenkins', 'user') except (TypeError, configparser.NoOptionError): user = None try: password = config.get('jenkins', 'password') except (TypeError, configparser.NoOptionError): password = None # Inform the user as to what is likely to happen, as they may specify # a real jenkins instance in test mode to get the plugin info to check # the XML generated. if user is None and password is None: logger.info("Will use anonymous access to Jenkins if needed.") elif (user is not None and password is None) or ( user is None and password is not None): raise JenkinsJobsException( "Cannot authenticate to Jenkins with only one of User and " "Password provided, please check your configuration." ) # None -- no timeout, blocking mode; same as setblocking(True) # 0.0 -- non-blocking mode; same as setblocking(False) <--- default # > 0 -- timeout mode; operations time out after timeout seconds # < 0 -- illegal; raises an exception # to retain the default must use # "timeout=jenkins_jobs.builder._DEFAULT_TIMEOUT" or not set timeout at # all. timeout = jenkins_jobs.builder._DEFAULT_TIMEOUT try: timeout = config.getfloat('jenkins', 'timeout') except (ValueError): raise JenkinsJobsException("Jenkins timeout config is invalid") except (TypeError, configparser.NoOptionError): pass plugins_info = None if getattr(options, 'plugins_info_path', None) is not None: with io.open(options.plugins_info_path, 'r', encoding='utf-8') as yaml_file: plugins_info = yaml.load(yaml_file) if not isinstance(plugins_info, list): raise JenkinsJobsException("{0} must contain a Yaml list!" .format(options.plugins_info_path)) elif (not options.conf or not config.getboolean("jenkins", "query_plugins_info")): logger.debug("Skipping plugin info retrieval") plugins_info = {} if options.allow_empty_variables is not None: config.set('job_builder', 'allow_empty_variables', str(options.allow_empty_variables)) builder = Builder(config.get('jenkins', 'url'), user, password, config, jenkins_timeout=timeout, ignore_cache=ignore_cache, flush_cache=options.flush_cache, plugins_list=plugins_info) if getattr(options, 'path', None): if options.path == sys.stdin: logger.debug("Input file is stdin") if options.path.isatty(): key = 'CTRL+Z' if platform.system() == 'Windows' else 'CTRL+D' logger.warn( "Reading configuration from STDIN. Press %s to end input.", key) # take list of paths options.path = options.path.split(os.pathsep) do_recurse = (getattr(options, 'recursive', False) or config.getboolean('job_builder', 'recursive')) excludes = [e for elist in options.exclude for e in elist.split(os.pathsep)] or \ config.get('job_builder', 'exclude').split(os.pathsep) paths = [] for path in options.path: if do_recurse and os.path.isdir(path): paths.extend(recurse_path(path, excludes)) else: paths.append(path) options.path = paths if options.command == 'delete': for job in options.name: builder.delete_job(job, options.path) elif options.command == 'delete-all': confirm('Sure you want to delete *ALL* jobs from Jenkins server?\n' '(including those not managed by Jenkins Job Builder)') logger.info("Deleting all jobs") builder.delete_all_jobs() elif options.command == 'update': logger.info("Updating jobs in {0} ({1})".format( options.path, options.names)) jobs, num_updated_jobs = builder.update_job(options.path, options.names) logger.info("Number of jobs updated: %d", num_updated_jobs) if options.delete_old: num_deleted_jobs = builder.delete_old_managed() logger.info("Number of jobs deleted: %d", num_deleted_jobs) elif options.command == 'test': builder.update_job(options.path, options.name, output=options.output_dir) def version(): return "Jenkins Job Builder version: %s" % \ jenkins_jobs.version.version_info.version_string() if __name__ == '__main__': sys.path.insert(0, '.') main()
apache-2.0
tokyo-jesus/university
src/python/koans/python3/koans/about_sets.py
82
1947
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutSets(Koan): def test_sets_make_keep_lists_unique(self): highlanders = ['MacLeod', 'Ramirez', 'MacLeod', 'Matunas', 'MacLeod', 'Malcolm', 'MacLeod'] there_can_only_be_only_one = set(highlanders) self.assertEqual(__, there_can_only_be_only_one) def test_empty_sets_have_different_syntax_to_populated_sets(self): self.assertEqual(__, {1, 2, 3}) self.assertEqual(__, set()) def test_dictionaries_and_sets_use_same_curly_braces(self): # Note: Sets have only started using braces since Python 3 self.assertEqual(__, {1, 2, 3}.__class__) self.assertEqual(__, {'one': 1, 'two': 2}.__class__) self.assertEqual(__, {}.__class__) def test_creating_sets_using_strings(self): self.assertEqual(__, {'12345'}) self.assertEqual(__, set('12345')) def test_convert_the_set_into_a_list_to_sort_it(self): self.assertEqual(__, sorted(set('12345'))) # ------------------------------------------------------------------ def test_set_have_arithmetic_operators(self): scotsmen = {'MacLeod', 'Wallace', 'Willie'} warriors = {'MacLeod', 'Wallace', 'Leonidas'} self.assertEqual(__, scotsmen - warriors) self.assertEqual(__, scotsmen | warriors) self.assertEqual(__, scotsmen & warriors) self.assertEqual(__, scotsmen ^ warriors) # ------------------------------------------------------------------ def test_we_can_query_set_membership(self): self.assertEqual(__, 127 in {127, 0, 0, 1} ) self.assertEqual(__, 'cow' not in set('apocalypse now') ) def test_we_can_compare_subsets(self): self.assertEqual(__, set('cake') <= set('cherry cake')) self.assertEqual(__, set('cake').issubset(set('cherry cake')) ) self.assertEqual(__, set('cake') > set('pie'))
unlicense
yrobla/nova
nova/openstack/common/rpc/proxy.py
18
6604
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A helper class for proxy objects to remote APIs. For more information about rpc API version numbers, see: rpc/dispatcher.py """ from nova.openstack.common import rpc class RpcProxy(object): """A helper class for rpc clients. This class is a wrapper around the RPC client API. It allows you to specify the topic and API version in a single place. This is intended to be used as a base class for a class that implements the client side of an rpc API. """ def __init__(self, topic, default_version): """Initialize an RpcProxy. :param topic: The topic to use for all messages. :param default_version: The default API version to request in all outgoing messages. This can be overridden on a per-message basis. """ self.topic = topic self.default_version = default_version super(RpcProxy, self).__init__() def _set_version(self, msg, vers): """Helper method to set the version in a message. :param msg: The message having a version added to it. :param vers: The version number to add to the message. """ msg['version'] = vers if vers else self.default_version def _get_topic(self, topic): """Return the topic to use for a message.""" return topic if topic else self.topic @staticmethod def make_msg(method, **kwargs): return {'method': method, 'args': kwargs} def call(self, context, msg, topic=None, version=None, timeout=None): """rpc.call() a remote method. :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param timeout: (Optional) A timeout to use when waiting for the response. If no timeout is specified, a default timeout will be used that is usually sufficient. :param version: (Optional) Override the requested API version in this message. :returns: The return value from the remote method. """ self._set_version(msg, version) return rpc.call(context, self._get_topic(topic), msg, timeout) def multicall(self, context, msg, topic=None, version=None, timeout=None): """rpc.multicall() a remote method. :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param timeout: (Optional) A timeout to use when waiting for the response. If no timeout is specified, a default timeout will be used that is usually sufficient. :param version: (Optional) Override the requested API version in this message. :returns: An iterator that lets you process each of the returned values from the remote method as they arrive. """ self._set_version(msg, version) return rpc.multicall(context, self._get_topic(topic), msg, timeout) def cast(self, context, msg, topic=None, version=None): """rpc.cast() a remote method. :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :returns: None. rpc.cast() does not wait on any return value from the remote method. """ self._set_version(msg, version) rpc.cast(context, self._get_topic(topic), msg) def fanout_cast(self, context, msg, topic=None, version=None): """rpc.fanout_cast() a remote method. :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :returns: None. rpc.fanout_cast() does not wait on any return value from the remote method. """ self._set_version(msg, version) rpc.fanout_cast(context, self._get_topic(topic), msg) def cast_to_server(self, context, server_params, msg, topic=None, version=None): """rpc.cast_to_server() a remote method. :param context: The request context :param server_params: Server parameters. See rpc.cast_to_server() for details. :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :returns: None. rpc.cast_to_server() does not wait on any return values. """ self._set_version(msg, version) rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) def fanout_cast_to_server(self, context, server_params, msg, topic=None, version=None): """rpc.fanout_cast_to_server() a remote method. :param context: The request context :param server_params: Server parameters. See rpc.cast_to_server() for details. :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :returns: None. rpc.fanout_cast_to_server() does not wait on any return values. """ self._set_version(msg, version) rpc.fanout_cast_to_server(context, server_params, self._get_topic(topic), msg)
apache-2.0
akuks/pretix
src/pretix/control/middleware.py
1
3279
from urllib.parse import urlparse from django.conf import settings from django.contrib.auth import REDIRECT_FIELD_NAME from django.core.urlresolvers import get_script_prefix, resolve from django.http import Http404 from django.shortcuts import resolve_url from django.utils.encoding import force_str from django.utils.translation import ugettext as _ from pretix.base.models import Event, EventPermission, Organizer class PermissionMiddleware: """ This middleware enforces all requests to the control app to require login. Additionally, it enforces all requests to "control:event." URLs to be for an event the user has basic access to. """ EXCEPTIONS = ( "auth.login", "auth.register" ) def process_request(self, request): url = resolve(request.path_info) url_name = url.url_name if not request.path.startswith(get_script_prefix() + 'control') or url_name in self.EXCEPTIONS: return if not request.user.is_authenticated(): # Taken from django/contrib/auth/decorators.py path = request.build_absolute_uri() # urlparse chokes on lazy objects in Python 3, force to str resolved_login_url = force_str( resolve_url(settings.LOGIN_URL_CONTROL)) # If the login url is the same scheme and net location then just # use the path as the "next" url. login_scheme, login_netloc = urlparse(resolved_login_url)[:2] current_scheme, current_netloc = urlparse(path)[:2] if ((not login_scheme or login_scheme == current_scheme) and (not login_netloc or login_netloc == current_netloc)): path = request.get_full_path() from django.contrib.auth.views import redirect_to_login return redirect_to_login( path, resolved_login_url, REDIRECT_FIELD_NAME) request.user.events_cache = request.user.events.current.order_by( "organizer", "date_from").prefetch_related("organizer") if 'event' in url.kwargs and 'organizer' in url.kwargs: try: request.event = Event.objects.current.filter( slug=url.kwargs['event'], permitted__id__exact=request.user.id, organizer__slug=url.kwargs['organizer'], ).select_related('organizer')[0] request.eventperm = EventPermission.objects.current.get( event=request.event, user=request.user ) request.organizer = request.event.organizer except IndexError: raise Http404(_("The selected event was not found or you " "have no permission to administrate it.")) elif 'organizer' in url.kwargs: try: request.organizer = Organizer.objects.current.filter( slug=url.kwargs['organizer'], permitted__id__exact=request.user.id, )[0] except IndexError: raise Http404(_("The selected organizer was not found or you " "have no permission to administrate it."))
apache-2.0
resmo/ansible
test/units/module_utils/common/test_network.py
103
1618
# -*- coding: utf-8 -*- # (c) 2017 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import absolute_import, division, print_function __metaclass__ = type import pytest from ansible.module_utils.common.network import ( to_masklen, to_netmask, to_subnet, to_ipv6_network, is_masklen, is_netmask ) def test_to_masklen(): assert 24 == to_masklen('255.255.255.0') def test_to_masklen_invalid(): with pytest.raises(ValueError): to_masklen('255') def test_to_netmask(): assert '255.0.0.0' == to_netmask(8) assert '255.0.0.0' == to_netmask('8') def test_to_netmask_invalid(): with pytest.raises(ValueError): to_netmask(128) def test_to_subnet(): result = to_subnet('192.168.1.1', 24) assert '192.168.1.0/24' == result result = to_subnet('192.168.1.1', 24, dotted_notation=True) assert '192.168.1.0 255.255.255.0' == result def test_to_subnet_invalid(): with pytest.raises(ValueError): to_subnet('foo', 'bar') def test_is_masklen(): assert is_masklen(32) assert not is_masklen(33) assert not is_masklen('foo') def test_is_netmask(): assert is_netmask('255.255.255.255') assert not is_netmask(24) assert not is_netmask('foo') def test_to_ipv6_network(): assert '2001:db8::' == to_ipv6_network('2001:db8::') assert '2001:0db8:85a3::' == to_ipv6_network('2001:0db8:85a3:0000:0000:8a2e:0370:7334') assert '2001:0db8:85a3::' == to_ipv6_network('2001:0db8:85a3:0:0:8a2e:0370:7334')
gpl-3.0
pytest-dev/execnet
doc/example/sysinfo.py
1
4880
# -*- coding: utf-8 -*- """ sysinfo.py [host1] [host2] [options] obtain system info from remote machine. (c) Holger Krekel, MIT license """ import optparse import re import sys import execnet import py parser = optparse.OptionParser(usage=__doc__) parser.add_option( "-f", "--sshconfig", action="store", dest="ssh_config", default=None, help="use given ssh config file," " and add info all contained hosts for getting info", ) parser.add_option( "-i", "--ignore", action="store", dest="ignores", default=None, help="ignore hosts " "(useful if the list of hostnames come from a file list)", ) def parsehosts(path): path = py.path.local(path) l = [] rex = re.compile(r"Host\s*(\S+)") for line in path.readlines(): m = rex.match(line) if m is not None: (sshname,) = m.groups() l.append(sshname) return l class RemoteInfo: def __init__(self, gateway): self.gw = gateway self._cache = {} def exreceive(self, execstring): if execstring not in self._cache: channel = self.gw.remote_exec(execstring) self._cache[execstring] = channel.receive() return self._cache[execstring] def getmodattr(self, modpath): module = modpath.split(".")[0] return self.exreceive( """ import %s channel.send(%s) """ % (module, modpath) ) def islinux(self): return self.getmodattr("sys.platform").find("linux") != -1 def getfqdn(self): return self.exreceive( """ import socket channel.send(socket.getfqdn()) """ ) def getmemswap(self): if self.islinux(): return self.exreceive( r""" import commands, re out = commands.getoutput("free") mem = re.search(r"Mem:\s+(\S*)", out).group(1) swap = re.search(r"Swap:\s+(\S*)", out).group(1) channel.send((mem, swap)) """ ) def getcpuinfo(self): if self.islinux(): return self.exreceive( """ # a hyperthreaded cpu core only counts as 1, although it # is present as 2 in /proc/cpuinfo. Counting it as 2 is # misleading because it is *by far* not as efficient as # two independent cores. cpus = {} cpuinfo = {} f = open("/proc/cpuinfo") lines = f.readlines() f.close() for line in lines + ['']: if line.strip(): key, value = line.split(":", 1) cpuinfo[key.strip()] = value.strip() else: corekey = (cpuinfo.get("physical id"), cpuinfo.get("core id")) cpus[corekey] = 1 numcpus = len(cpus) model = cpuinfo.get("model name") channel.send((numcpus, model)) """ ) def debug(*args): print >>sys.stderr, " ".join(map(str, args)) def error(*args): debug("ERROR", args[0] + ":", *args[1:]) def getinfo(sshname, ssh_config=None, loginfo=sys.stdout): if ssh_config: spec = "ssh=-F {} {}".format(ssh_config, sshname) else: spec += "ssh=%s" % sshname debug("connecting to", repr(spec)) try: gw = execnet.makegateway(spec) except IOError: error("could not get sshgatway", sshname) else: ri = RemoteInfo(gw) # print "%s info:" % sshname prefix = sshname.upper() + " " print >> loginfo, prefix, "fqdn:", ri.getfqdn() for attr in ("sys.platform", "sys.version_info"): loginfo.write("{} {}: ".format(prefix, attr)) loginfo.flush() value = ri.getmodattr(attr) loginfo.write(str(value)) loginfo.write("\n") loginfo.flush() memswap = ri.getmemswap() if memswap: mem, swap = memswap print >> loginfo, prefix, "Memory:", mem, "Swap:", swap cpuinfo = ri.getcpuinfo() if cpuinfo: numcpu, model = cpuinfo print >> loginfo, prefix, "number of cpus:", numcpu print >> loginfo, prefix, "cpu model", model return ri if __name__ == "__main__": options, args = parser.parse_args() hosts = list(args) ssh_config = options.ssh_config if ssh_config: hosts.extend(parsehosts(ssh_config)) ignores = options.ignores or () if ignores: ignores = ignores.split(",") for host in hosts: if host not in ignores: getinfo(host, ssh_config=ssh_config)
mit
rmboggs/boo
lib/antlr-2.7.5/examples/python/linkChecker/LinkChecker.py
21
7759
import sys import os, os.path import string import traceback import antlr version = sys.version.split()[0] if version < '2.2.1': False = 0 if version < '2.3': True = not False import LinkExtractor class LinkListener: def hrefReference(self, target, line): raise NotImplementedError def imageReference(self, imageFileName, line): raise NotImplementedError class LinkChecker(LinkListener): ### Record which files we have seen so that we don't get into an # infinite loop and for efficiency. The absolute path is stored here # to uniquely identify the files. That is, a file can be arrived # at from many different locations such as help.html from . # and ../help.html from a directory below. # # This table is shared by all instances of LinkChecker. # visited = {} ### A table of the images visited by any document; a cache of correctness imgVisited = {} recursionDepth = 0 separator = "/" # not OS sensitive in HTML localSeparator = None def __init__(self, document): self.document = document self.directory = "." LinkChecker.localSeparator = os.sep def checkLinkRules(self, fName, line): # Check case of path (check for UNIX compatibility on a PC)! offensive = LinkChecker.offensivePathMember(self.directory + separator + fName) if offensive: file_ = '' try: f = file(offensive) file_ = os.path.normcase(offensive) self.error("Case mismatch in reference " + fName + ":" + os.sep + "\treal name is " + os.path.basename(file_) + os.sep + "\treal absolute path is " + file_, line); return False except Exception, e: self.error("internal error: cannot get canonical name for " + offensive, line); if LinkChecker.pathIsAbsolute(fName): self.error("Reference to " + fName + " with absolute path", line); return False; return True def doCheck(self): if self.document[-5:] != ".html": return # prevent infinite recursion to this file if LinkChecker.isVisited(self.document): return LinkChecker.visit(self.document) LinkChecker.recursionDepth += 1 f = file(self.document) lexer = LinkExtractor.Lexer(f) lexer.addLinkListener(self) # this will parse whole file since all tokens are skipped lexer.nextToken() LinkChecker.recursionDepth -= 1 def error(self, err, line): d = "<internal error>" try: # f = file(self.document) d = os.path.normcase(self.document) except Exception, e: sys.stderr.write("internal error: cannot find file that has error\n") sys.exit(0) sys.stderr.write(d + ":" + str(line) + ": error: " + err + '\n') def pathIsAbsolute(path): return path[0] == '/' or path[1] == ':' pathIsAbsolute = staticmethod(pathIsAbsolute) def fileProtocolURL(target): return target.find("://") == -1 and \ not target[:7] == "mailto:" and \ not target[:5] == "news:" fileProtocolURL = staticmethod(fileProtocolURL) def getParent(path): return os.path.join(os.path.split(path)[:-1]) getParent = staticmethod(getParent) def hrefReference(self, target, line): sys.stdout.write(self.document + ":" + str(line) + ": href to " + target + '\n') # recursively check the target document unless non-file ref if LinkChecker.fileProtocolURL(target): # prune off any #name reference on end of file pound = target.find('#') path = target if pound != -1: path = target[:pound] # rip off #name on end, leave file if not len(path): return # ref to name in this file # first check existence on disk f = self.directory + os.sep + path if not os.path.exists(f): self.error("Reference to missing file " + path, line) return # check the case self.checkLinkRules(path, line); try: # Link is ok, now follow the link chk = LinkChecker.Lexer(self.directory + os.sep + path) chk.doCheck() except Exception, e: self.error("Document does not exist: " + target, line) def imageLinkIsOk(file_): # f = file(file_) f = os.path.normcase(file_) b = f in LinkChecker.imgVisited.keys() if b: return True return False imageLinkIsOk = staticmethod(imageLinkIsOk) def imageReference(self, imageFileName, line): # first check if we have seen this exact file try: if LinkChecker.imageLinkIsOk(self.directory + os.sep + imageFileName): return f = self.directory + os.sep + imageFileName if not os.path.exists(f): self.error("Reference to missing file " + imageFileName, line); return; if self.checkLinkRules(imageFileName, line): LinkChecker.visitImage(self.directory + os.sep + imageFileName) except Exception, e: sys.stderr.write("internal error: " + str(e) + '\n') ### # Given a path to a file or dir, is the case of the reference # the same as the actual path on the disk? This is only # meaningful on a PC which is case-insensitive (not a real # file system). # # Returns null if there is nothing offensive and the file exists. # Returns offending file/dir if it does not exist or # it has there is a case mismatch for it. The last file is checked # first followed by the parent directory, recursively, all the way # to the absolute or relative path root in that String; i.e., we parse # from right to left. # # Because the File object won't actually go get the real filename # from the disk so we can compare, we must get a directory listing # of the directory and then look for the referenced file or dir. # For example, for "./images/logo.gif" we would check "./images" dir # listing for "logo.gif" with the appropriate case and then check # directory "." for a dir called images with the right case. When # no parent exists, we can stop looking for case problems. def offensivePathMember(fName): sys.stdout.write("caseMismatch(" + fName + ")\n"); # have we reached the root? (stopping condition) if not fName or not LinkChecker.getParent(fName): return None parent = LinkChecker.getParent(fName) fName = os.path.basename(fName) # f = file(parent) parentFiles = os.path.split(parent) sys.stdout.write("checking dir " + parent + " for " + fName + '\n') # handle weird stuff like "c:/doc/../foo"; skip this parent dir if fName == "..": return LinkChecker.offensivePathMember(LinkChecker.getParent(parent)) for i in range(len(parentFiles)): sys.stdout.write("is it " + parentFiles[i] + "?\n") if string.lower(parentFiles[i]) == fName: if not parentFiles[i] == fName: sys.stdout.write("case mismatch " + fName + " in " + parent + '\n') return parent + LinkChecker.separator + fName # found a match, verify parent is ok return LinkChecker.offensivePathMember(parent) sys.stdout.write("can't find " + fName + " in " + parent + '\n') return parent + LinkChecker.separator + fName offensivePathMember = staticmethod(offensivePathMember) def visit(file_): # f = file(file_) f = os.path.normcase(file_) LinkChecker.visited[f] = True visit = staticmethod(visit) def isVisited(file_): # f = file(file_) f = os.path.normcase(file_) return f in LinkChecker.visited.keys() isVisited = staticmethod(isVisited) def visitImage(file_): # f = file(file_) f = os.path.normcase(file_) sys.stdout.write("caching image " + f + '\n') LinkChecker.imgVisited[f] = True visitImage = staticmethod(visitImage) class Main: def __init__(self): chk = LinkChecker(sys.argv[1]) try: chk.doCheck() except Exception, e: sys.stderr.write("Exception: " + str(e) + '\n'); apply(traceback.print_exception, sys.exc_info()) if __name__ == "__main__": Main()
bsd-3-clause
franosincic/edx-platform
lms/djangoapps/ccx/tests/test_overrides.py
6
6980
# coding=UTF-8 """ tests for overrides """ import datetime import mock import pytz from nose.plugins.attrib import attr from courseware.field_overrides import OverrideFieldData from django.test.utils import override_settings from lms.djangoapps.courseware.tests.test_field_overrides import inject_field_overrides from request_cache.middleware import RequestCache from student.tests.factories import AdminFactory from xmodule.modulestore.tests.django_utils import ( ModuleStoreTestCase, TEST_DATA_SPLIT_MODULESTORE) from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from lms.djangoapps.ccx.models import CustomCourseForEdX from lms.djangoapps.ccx.overrides import override_field_for_ccx from lms.djangoapps.ccx.tests.utils import flatten, iter_blocks @attr('shard_1') @override_settings(FIELD_OVERRIDE_PROVIDERS=( 'ccx.overrides.CustomCoursesForEdxOverrideProvider',)) class TestFieldOverrides(ModuleStoreTestCase): """ Make sure field overrides behave in the expected manner. """ MODULESTORE = TEST_DATA_SPLIT_MODULESTORE def setUp(self): """ Set up tests """ super(TestFieldOverrides, self).setUp() self.course = course = CourseFactory.create() self.course.enable_ccx = True # Create a course outline self.mooc_start = start = datetime.datetime( 2010, 5, 12, 2, 42, tzinfo=pytz.UTC) self.mooc_due = due = datetime.datetime( 2010, 7, 7, 0, 0, tzinfo=pytz.UTC) chapters = [ItemFactory.create(start=start, parent=course) for _ in xrange(2)] sequentials = flatten([ [ItemFactory.create(parent=chapter) for _ in xrange(2)] for chapter in chapters]) verticals = flatten([ [ItemFactory.create(due=due, parent=sequential) for _ in xrange(2)] for sequential in sequentials]) blocks = flatten([ # pylint: disable=unused-variable [ItemFactory.create(parent=vertical) for _ in xrange(2)] for vertical in verticals]) self.ccx = ccx = CustomCourseForEdX( course_id=course.id, display_name='Test CCX', coach=AdminFactory.create()) ccx.save() patch = mock.patch('ccx.overrides.get_current_ccx') self.get_ccx = get_ccx = patch.start() get_ccx.return_value = ccx self.addCleanup(patch.stop) self.addCleanup(RequestCache.clear_request_cache) inject_field_overrides(iter_blocks(ccx.course), course, AdminFactory.create()) def cleanup_provider_classes(): """ After everything is done, clean up by un-doing the change to the OverrideFieldData object that is done during the wrap method. """ OverrideFieldData.provider_classes = None self.addCleanup(cleanup_provider_classes) def test_override_start(self): """ Test that overriding start date on a chapter works. """ ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC) chapter = self.ccx.course.get_children()[0] override_field_for_ccx(self.ccx, chapter, 'start', ccx_start) self.assertEquals(chapter.start, ccx_start) def test_override_num_queries_new_field(self): """ Test that for creating new field executed only create query """ ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC) chapter = self.ccx.course.get_children()[0] # One outer SAVEPOINT/RELEASE SAVEPOINT pair around everything caused by the # transaction.atomic decorator wrapping override_field_for_ccx. # One SELECT and one INSERT. # One inner SAVEPOINT/RELEASE SAVEPOINT pair around the INSERT caused by the # transaction.atomic down in Django's get_or_create()/_create_object_from_params(). with self.assertNumQueries(6): override_field_for_ccx(self.ccx, chapter, 'start', ccx_start) def test_override_num_queries_update_existing_field(self): """ Test that overriding existing field executed create, fetch and update queries. """ ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC) new_ccx_start = datetime.datetime(2015, 12, 25, 00, 00, tzinfo=pytz.UTC) chapter = self.ccx.course.get_children()[0] override_field_for_ccx(self.ccx, chapter, 'start', ccx_start) with self.assertNumQueries(3): override_field_for_ccx(self.ccx, chapter, 'start', new_ccx_start) def test_override_num_queries_field_value_not_changed(self): """ Test that if value of field does not changed no query execute. """ ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC) chapter = self.ccx.course.get_children()[0] override_field_for_ccx(self.ccx, chapter, 'start', ccx_start) with self.assertNumQueries(2): # 2 savepoints override_field_for_ccx(self.ccx, chapter, 'start', ccx_start) def test_overriden_field_access_produces_no_extra_queries(self): """ Test no extra queries when accessing an overriden field more than once. """ ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC) chapter = self.ccx.course.get_children()[0] # One outer SAVEPOINT/RELEASE SAVEPOINT pair around everything caused by the # transaction.atomic decorator wrapping override_field_for_ccx. # One SELECT and one INSERT. # One inner SAVEPOINT/RELEASE SAVEPOINT pair around the INSERT caused by the # transaction.atomic down in Django's get_or_create()/_create_object_from_params(). with self.assertNumQueries(6): override_field_for_ccx(self.ccx, chapter, 'start', ccx_start) def test_override_is_inherited(self): """ Test that sequentials inherit overridden start date from chapter. """ ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC) chapter = self.ccx.course.get_children()[0] override_field_for_ccx(self.ccx, chapter, 'start', ccx_start) self.assertEquals(chapter.get_children()[0].start, ccx_start) self.assertEquals(chapter.get_children()[1].start, ccx_start) def test_override_is_inherited_even_if_set_in_mooc(self): """ Test that a due date set on a chapter is inherited by grandchildren (verticals) even if a due date is set explicitly on grandchildren in the mooc. """ ccx_due = datetime.datetime(2015, 1, 1, 00, 00, tzinfo=pytz.UTC) chapter = self.ccx.course.get_children()[0] chapter.display_name = 'itsme!' override_field_for_ccx(self.ccx, chapter, 'due', ccx_due) vertical = chapter.get_children()[0].get_children()[0] self.assertEqual(vertical.due, ccx_due)
agpl-3.0
cvandeplas/plaso
plaso/parsers/asl.py
1
16248
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2013 The Plaso Project Authors. # Please see the AUTHORS file for details on individual authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The Apple System Log Parser.""" import construct import logging import os from plaso.lib import errors from plaso.lib import event from plaso.lib import eventdata from plaso.lib import timelib from plaso.parsers import interface from plaso.parsers import manager __author__ = 'Joaquin Moreno Garijo (Joaquin.MorenoGarijo.2013@live.rhul.ac.uk)' # TODO: get the real name for the user of the group having the uid or gid. class AslEvent(event.EventObject): """Convenience class for an asl event.""" DATA_TYPE = 'mac:asl:event' def __init__( self, timestamp, record_position, message_id, level, record_header, read_uid, read_gid, computer_name, sender, facility, message, extra_information): """Initializes the event object. Args: timestamp: timestamp of the entry. record_position: position where the record start. message_id: Identification value for an ASL message. level: level of criticity. record_header: header of the entry. pid: identificaiton number of the process. uid: identification number of the owner of the process. gid: identification number of the group of the process. read_uid: the user ID that can read this file. If -1: all. read_gid: the group ID that can read this file. If -1: all. computer_name: name of the host. sender: the process that insert the event. facility: the part of the sender that create the event. message: message of the event. extra_information: extra fields associated to each entry. """ super(AslEvent, self).__init__() self.pid = record_header.pid self.user_sid = unicode(record_header.uid) self.group_id = record_header.gid self.timestamp = timestamp self.timestamp_desc = eventdata.EventTimestamp.CREATION_TIME self.record_position = record_position self.message_id = message_id self.level = level self.read_uid = read_uid self.read_gid = read_gid self.computer_name = computer_name self.sender = sender self.facility = facility self.message = message self.extra_information = extra_information class AslParser(interface.BaseParser): """Parser for ASL log files.""" NAME = 'asl_log' DESCRIPTION = u'Parser for ASL log files.' ASL_MAGIC = 'ASL DB\x00\x00\x00\x00\x00\x00' # If not right assigned, the value is "-1". ASL_NO_RIGHTS = 'ffffffff' # Priority level (criticity) ASL_MESSAGE_PRIORITY = { 0 : 'EMERGENCY', 1 : 'ALERT', 2 : 'CRITICAL', 3 : 'ERROR', 4 : 'WARNING', 5 : 'NOTICE', 6 : 'INFO', 7 : 'DEBUG'} # ASL File header. # magic: magic number that identify ASL files. # version: version of the file. # offset: first record in the file. # timestamp: epoch time when the first entry was written. # last_offset: last record in the file. ASL_HEADER_STRUCT = construct.Struct( 'asl_header_struct', construct.String('magic', 12), construct.UBInt32('version'), construct.UBInt64('offset'), construct.UBInt64('timestamp'), construct.UBInt32('cache_size'), construct.UBInt64('last_offset'), construct.Padding(36)) # The record structure is: # [HEAP][STRUCTURE][4xExtraField][2xExtraField]*[PreviousEntry] # Record static structure. # tam_entry: it contains the number of bytes from this file position # until the end of the record, without counts itself. # next_offset: next record. If is equal to 0x00, it is the last record. # asl_message_id: integer that has the numeric identification of the event. # timestamp: Epoch integer that has the time when the entry was created. # nanosecond: nanosecond to add to the timestamp. # level: level of priority. # pid: process identification that ask to save the record. # uid: user identification that has lunched the process. # gid: group identification that has lunched the process. # read_uid: identification id of a user. Only applied if is not -1 (all FF). # Only root and this user can read the entry. # read_gid: the same than read_uid, but for the group. ASL_RECORD_STRUCT = construct.Struct( 'asl_record_struct', construct.Padding(2), construct.UBInt32('tam_entry'), construct.UBInt64('next_offset'), construct.UBInt64('asl_message_id'), construct.UBInt64('timestamp'), construct.UBInt32('nanosec'), construct.UBInt16('level'), construct.UBInt16('flags'), construct.UBInt32('pid'), construct.UBInt32('uid'), construct.UBInt32('gid'), construct.UBInt32('read_uid'), construct.UBInt32('read_gid'), construct.UBInt64('ref_pid')) ASL_RECORD_STRUCT_SIZE = ASL_RECORD_STRUCT.sizeof() # 8-byte fields, they can be: # - String: [Nibble = 1000 (8)][Nibble = Length][7 Bytes = String]. # - Integer: integer that has the byte position in the file that points # to an ASL_RECORD_DYN_VALUE struct. If the value of the integer # is equal to 0, it means that it has not data (skip). # If the field is a String, we use this structure to decode each # integer byte in the corresponding character (ASCII Char). ASL_OCTET_STRING = construct.ExprAdapter( construct.Octet('string'), encoder=lambda obj, ctx: ord(obj), decoder=lambda obj, ctx: chr(obj)) # Field string structure. If the first bit is 1, it means that it # is a String (1000) = 8, then the next nibble has the number of # characters. The last 7 bytes are the number of bytes. ASL_STRING = construct.BitStruct( 'string', construct.Flag('type'), construct.Bits('filler', 3), construct.If( lambda ctx: ctx.type, construct.Nibble('string_length')), construct.If( lambda ctx: ctx.type, construct.Array(7, ASL_OCTET_STRING))) # 8-byte pointer to a byte position in the file. ASL_POINTER = construct.UBInt64('pointer') # Dynamic data structure pointed by a pointer that contains a String: # [2 bytes padding][4 bytes lenght of String][String]. ASL_RECORD_DYN_VALUE = construct.Struct( 'asl_record_dyn_value', construct.Padding(2), construct.PascalString( 'value', length_field=construct.UBInt32('length'))) def Parse(self, parser_context, file_entry): """Extract entries from an ASL file. Args: parser_context: A parser context object (instance of ParserContext). file_entry: A file entry object (instance of dfvfs.FileEntry). """ file_object = file_entry.GetFileObject() file_object.seek(0, os.SEEK_SET) try: header = self.ASL_HEADER_STRUCT.parse_stream(file_object) except (IOError, construct.FieldError) as exception: file_object.close() raise errors.UnableToParseFile( u'Unable to parse ASL Header with error: {0:s}.'.format(exception)) if header.magic != self.ASL_MAGIC: file_object.close() raise errors.UnableToParseFile(u'Not an ASL Header, unable to parse.') # Get the first and the last entry. offset = header.offset old_offset = header.offset last_offset_header = header.last_offset # If the ASL file has entries. if offset: event_object, offset = self.ReadAslEvent(file_object, offset) while event_object: parser_context.ProduceEvent( event_object, parser_name=self.NAME, file_entry=file_entry) # TODO: an anomaly object must be emitted once that is implemented. # Sanity check, the last read element must be the same as # indicated by the header. if offset == 0 and old_offset != last_offset_header: logging.warning(u'Parsing ended before the header ends.') old_offset = offset event_object, offset = self.ReadAslEvent(file_object, offset) file_object.close() def ReadAslEvent(self, file_object, offset): """Returns an AslEvent from a single ASL entry. Args: file_object: a file-like object that points to an ASL file. offset: offset where the static part of the entry starts. Returns: An event object constructed from a single ASL record. """ # The heap of the entry is saved to try to avoid seek (performance issue). # It has the real start position of the entry. dynamic_start = file_object.tell() dynamic_part = file_object.read(offset - file_object.tell()) if not offset: return None, None try: record_header = self.ASL_RECORD_STRUCT.parse_stream(file_object) except (IOError, construct.FieldError) as exception: logging.warning( u'Unable to parse ASL event with error: {0:s}'.format(exception)) return None, None # Variable tam_fields = is the real length of the dynamic fields. # We have this: [Record_Struct] + [Dynamic_Fields] + [Pointer_Entry_Before] # In Record_Struct we have a field called tam_entry, where it has the number # of bytes until the end of the entry from the position that the field is. # The tam_entry is between the 2th and the 6th byte in the [Record_Struct]. # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before] # Also, we do not need [Point_Entry_Before] and then we delete the size of # [Point_Entry_Before] that it is 8 bytes (8): # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before] # [Dynamic_Fields] = tam_entry - [Record_Struct] + 6 - 8 # [Dynamic_Fields] = tam_entry - [Record_Struct] - 2 tam_fields = record_header.tam_entry - self.ASL_RECORD_STRUCT_SIZE - 2 # Dynamic part of the entry that contains minimal four fields of 8 bytes # plus 2x[8bytes] fields for each extra ASL_Field. # The four first fields are always the Host, Sender, Facility and Message. # After the four first fields, the entry might have extra ASL_Fields. # For each extra ASL_field, it has a pair of 8-byte fields where the first # 8 bytes contains the name of the extra ASL_field and the second 8 bytes # contains the text of the exta field. # All of this 8-byte field can be saved using one of these three differents # types: # - Null value ('0000000000000000'): nothing to do. # - String: It is string if first bit = 1 or first nibble = 8 (1000). # Second nibble has the length of string. # The next 7 bytes have the text characters of the string # padding the end with null characters: '0x00'. # Example: [8468 6964 6400 0000] # [8] String, [4] length, value: [68 69 64 64] = hidd. # - Pointer: static position in the file to a special struct # implemented as an ASL_RECORD_DYN_VALUE. # Example: [0000 0000 0000 0077] # It points to the file position 0x077 that has a # ASL_RECORD_DYN_VALUE structure. values = [] while tam_fields > 0: try: raw_field = file_object.read(8) except (IOError, construct.FieldError) as exception: logging.warning( u'Unable to parse ASL event with error: {0:d}'.format(exception)) return None, None try: # Try to read as a String. field = self.ASL_STRING.parse(raw_field) values.append(''.join(field.string[0:field.string_length])) # Go to parse the next extra field. tam_fields -= 8 continue except ValueError: pass # If it is not a string, it must be a pointer. try: field = self.ASL_POINTER.parse(raw_field) except ValueError as exception: logging.warning( u'Unable to parse ASL event with error: {0:s}'.format(exception)) return None, None if field != 0: # The next IF ELSE is only for performance issues, avoiding seek. # If the pointer points a lower position than where the actual entry # starts, it means that it points to a previuos entry. pos = field - dynamic_start # Bigger or equal 0 means that the data is in the actual entry. if pos >= 0: try: values.append((self.ASL_RECORD_DYN_VALUE.parse( dynamic_part[pos:])).value.partition('\x00')[0]) except (IOError, construct.FieldError) as exception: logging.warning( u'Unable to parse ASL event with error: {0:s}'.format( exception)) return None, None else: # Only if it is a pointer that points to the # heap from another entry we use the seek method. main_position = file_object.tell() # If the pointer is in a previous entry. if main_position > field: file_object.seek(field - main_position, os.SEEK_CUR) try: values.append((self.ASL_RECORD_DYN_VALUE.parse_stream( file_object)).value.partition('\x00')[0]) except (IOError, construct.FieldError): logging.warning(( u'The pointer at {0:d} (0x{0:x}) points to invalid ' u'information.').format( main_position - self.ASL_POINTER.sizeof())) # Come back to the position in the entry. _ = file_object.read(main_position - file_object.tell()) else: _ = file_object.read(field - main_position) values.append((self.ASL_RECORD_DYN_VALUE.parse_stream( file_object)).value.partition('\x00')[0]) # Come back to the position in the entry. file_object.seek(main_position - file_object.tell(), os.SEEK_CUR) # Next extra field: 8 bytes more. tam_fields -= 8 # Read the last 8 bytes of the record that points to the previous entry. _ = file_object.read(8) # Parsed section, we translate the read data to an appropriate format. microsecond = record_header.nanosec // 1000 timestamp = timelib.Timestamp.FromPosixTimeWithMicrosecond( record_header.timestamp, microsecond) record_position = offset message_id = record_header.asl_message_id level = u'{0} ({1})'.format( self.ASL_MESSAGE_PRIORITY[record_header.level], record_header.level) # If the value is -1 (FFFFFFFF), it can be read by everyone. if record_header.read_uid != int(self.ASL_NO_RIGHTS, 16): read_uid = record_header.read_uid else: read_uid = 'ALL' if record_header.read_gid != int(self.ASL_NO_RIGHTS, 16): read_gid = record_header.read_gid else: read_gid = 'ALL' # Parsing the dynamic values (text or pointers to position with text). # The first four are always the host, sender, facility, and message. computer_name = values[0] sender = values[1] facility = values[2] message = values[3] # If the entry has an extra fields, they works as a pairs: # The first is the name of the field and the second the value. extra_information = '' if len(values) > 4: values = values[4:] for index in xrange(0, len(values) // 2): extra_information += (u'[{0}: {1}]'.format( values[index * 2], values[(index * 2) + 1])) # Return the event and the offset for the next entry. return AslEvent(timestamp, record_position, message_id, level, record_header, read_uid, read_gid, computer_name, sender, facility, message, extra_information), record_header.next_offset manager.ParsersManager.RegisterParser(AslParser)
apache-2.0
rackerlabs/horizon
horizon/decorators.py
3
3360
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 CRS4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ General-purpose decorators for use with Horizon. """ import functools from django.utils.decorators import available_attrs from django.utils.translation import ugettext_lazy as _ def _current_component(view_func, dashboard=None, panel=None): """ Sets the currently-active dashboard and/or panel on the request. """ @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if dashboard: request.horizon['dashboard'] = dashboard if panel: request.horizon['panel'] = panel return view_func(request, *args, **kwargs) return dec def require_auth(view_func): """ Performs user authentication check. Similar to Django's `login_required` decorator, except that this throws :exc:`~horizon.exceptions.NotAuthenticated` exception if the user is not signed-in. """ from horizon.exceptions import NotAuthenticated @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if request.user.is_authenticated(): return view_func(request, *args, **kwargs) raise NotAuthenticated(_("Please log in to continue.")) return dec def require_perms(view_func, required): """ Enforces permission-based access controls. :param list required: A tuple of permission names, all of which the request user must possess in order access the decorated view. Example usage:: from horizon.decorators import require_perms @require_perms(['foo.admin', 'foo.member']) def my_view(request): ... Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the requirements are not met. """ from horizon.exceptions import NotAuthorized # We only need to check each permission once for a view, so we'll use a set current_perms = getattr(view_func, '_required_perms', set([])) view_func._required_perms = current_perms | set(required) @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if request.user.is_authenticated(): if request.user.has_perms(view_func._required_perms): return view_func(request, *args, **kwargs) raise NotAuthorized(_("You are not authorized to access %s") % request.path) # If we don't have any permissions, just return the original view. if required: return dec else: return view_func
apache-2.0
obviel/obviel
doc/conf.py
1
7192
# -*- coding: utf-8 -*- # # Obviel documentation build configuration file, created by # sphinx-quickstart on Wed Nov 24 02:18:25 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Obviel' copyright = u'2011-2013, Obviel Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' highlight_language = 'javascript' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'obviel-theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "nosidebar": True } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['.'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static', '../src', # obviel itself & deps & demos ] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = {'**': ['localtoc.html']} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'obvieldoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Obviel.tex', u'Obviel Documentation', u'Obviel Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'obviel', u'Obviel Documentation', [u'Obviel Developers'], 1) ] primary_domain = 'js'
mit
luckielordie/conan
conans/client/conf/__init__.py
1
17606
import os from six.moves.configparser import ConfigParser, NoSectionError from six.moves import urllib from conans.errors import ConanException from conans.model.env_info import unquote from conans.paths import conan_expand_user, DEFAULT_PROFILE_NAME from conans.util.env_reader import get_env from conans.util.files import load MIN_SERVER_COMPATIBLE_VERSION = '0.12.0' default_settings_yml = """ # Only for cross building, 'os_build/arch_build' is the system that runs Conan os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS] arch_build: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k] # Only for building cross compilation tools, 'os_target/arch_target' is the system for # which the tools generate code os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino] arch_target: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k] # Rest of the settings are "host" settings: # - For native building/cross building: Where the library/program will run. # - For building cross compilation tools: Where the cross compiler will run. os: Windows: subsystem: [None, cygwin, msys, msys2, wsl] WindowsStore: version: ["8.1", "10.0"] Linux: Macos: version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"] Android: api_level: ANY iOS: version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0"] watchOS: version: ["4.0"] tvOS: version: ["11.0"] FreeBSD: SunOS: Arduino: board: ANY arch: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k] compiler: sun-cc: version: ["5.10", "5.11", "5.12", "5.13", "5.14"] threads: [None, posix] libcxx: [libCstd, libstdcxx, libstlport, libstdc++] gcc: version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9", "5", "5.1", "5.2", "5.3", "5.4", "5.5", "6", "6.1", "6.2", "6.3", "6.4", "7", "7.1", "7.2", "7.3", "8", "8.1"] libcxx: [libstdc++, libstdc++11] threads: [None, posix, win32] # Windows MinGW exception: [None, dwarf2, sjlj, seh] # Windows MinGW Visual Studio: runtime: [MD, MT, MTd, MDd] version: ["8", "9", "10", "11", "12", "14", "15"] toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp, v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp, LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp, LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2] clang: version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0", "5.0", "6.0", "7.0"] libcxx: [libstdc++, libstdc++11, libc++] apple-clang: version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"] libcxx: [libstdc++, libc++] build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel] cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] """ default_client_conf = """ [log] run_to_output = True # environment CONAN_LOG_RUN_TO_OUTPUT run_to_file = False # environment CONAN_LOG_RUN_TO_FILE level = 50 # environment CONAN_LOGGING_LEVEL # trace_file = # environment CONAN_TRACE_FILE print_run_commands = False # environment CONAN_PRINT_RUN_COMMANDS [general] default_profile = %s compression_level = 9 # environment CONAN_COMPRESSION_LEVEL sysrequires_sudo = True # environment CONAN_SYSREQUIRES_SUDO request_timeout = 60 # environment CONAN_REQUEST_TIMEOUT (seconds) # sysrequires_mode = enabled # environment CONAN_SYSREQUIRES_MODE (allowed modes enabled/verify/disabled) # vs_installation_preference = Enterprise, Professional, Community, BuildTools # environment CONAN_VS_INSTALLATION_PREFERENCE # verbose_traceback = False # environment CONAN_VERBOSE_TRACEBACK # bash_path = "" # environment CONAN_BASH_PATH (only windows) # recipe_linter = False # environment CONAN_RECIPE_LINTER # read_only_cache = True # environment CONAN_READ_ONLY_CACHE # pylintrc = path/to/pylintrc_file # environment CONAN_PYLINTRC # cache_no_locks = True # Disable locking mechanism of local cache # user_home_short = your_path # environment CONAN_USER_HOME_SHORT # skip_vs_projects_upgrade = False # environment CONAN_SKIP_VS_PROJECTS_UPGRADE # non_interactive = False # environment CONAN_NON_INTERACTIVE # conan_make_program = make # environment CONAN_MAKE_PROGRAM (overrides the make program used in AutoToolsBuildEnvironment.make) # cmake_generator # environment CONAN_CMAKE_GENERATOR # http://www.vtk.org/Wiki/CMake_Cross_Compiling # cmake_toolchain_file # environment CONAN_CMAKE_TOOLCHAIN_FILE # cmake_system_name # environment CONAN_CMAKE_SYSTEM_NAME # cmake_system_version # environment CONAN_CMAKE_SYSTEM_VERSION # cmake_system_processor # environment CONAN_CMAKE_SYSTEM_PROCESSOR # cmake_find_root_path # environment CONAN_CMAKE_FIND_ROOT_PATH # cmake_find_root_path_mode_program # environment CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM # cmake_find_root_path_mode_library # environment CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY # cmake_find_root_path_mode_include # environment CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE # cpu_count = 1 # environment CONAN_CPU_COUNT # Change the default location for building test packages to a temporary folder # which is deleted after the test. # temp_test_folder = True # environment CONAN_TEMP_TEST_FOLDER [storage] # This is the default path, but you can write your own. It must be an absolute path or a # path beginning with "~" (if the environment var CONAN_USER_HOME is specified, this directory, even # with "~/", will be relative to the conan user home, not to the system user home) path = ~/.conan/data [proxies] # Empty section will try to use system proxies. # If don't want proxy at all, remove section [proxies] # As documented in http://docs.python-requests.org/en/latest/user/advanced/#proxies # http = http://user:pass@10.10.1.10:3128/ # http = http://10.10.1.10:3128 # https = http://10.10.1.10:1080 # You can skip the proxy for the matching (fnmatch) urls (comma-separated) # no_proxy_match = *bintray.com*, https://myserver.* [plugins] # environment CONAN_PLUGINS attribute_checker # Default settings now declared in the default profile """ % DEFAULT_PROFILE_NAME class ConanClientConfigParser(ConfigParser, object): def __init__(self, filename): ConfigParser.__init__(self, allow_no_value=True) self.read(filename) self.filename = filename # So keys are not converted to lowercase, we override the default optionxform optionxform = str @property def env_vars(self): ret = {"CONAN_LOG_RUN_TO_OUTPUT": self._env_c("log.run_to_output", "CONAN_LOG_RUN_TO_OUTPUT", "True"), "CONAN_LOG_RUN_TO_FILE": self._env_c("log.run_to_file", "CONAN_LOG_RUN_TO_FILE", "False"), "CONAN_LOGGING_LEVEL": self._env_c("log.level", "CONAN_LOGGING_LEVEL", "50"), "CONAN_TRACE_FILE": self._env_c("log.trace_file", "CONAN_TRACE_FILE", None), "CONAN_PRINT_RUN_COMMANDS": self._env_c("log.print_run_commands", "CONAN_PRINT_RUN_COMMANDS", "False"), "CONAN_COMPRESSION_LEVEL": self._env_c("general.compression_level", "CONAN_COMPRESSION_LEVEL", "9"), "CONAN_NON_INTERACTIVE": self._env_c("general.non_interactive", "CONAN_NON_INTERACTIVE", "False"), "CONAN_PYLINTRC": self._env_c("general.pylintrc", "CONAN_PYLINTRC", None), "CONAN_PYLINT_WERR": self._env_c("general.pylint_werr", "CONAN_PYLINT_WERR", None), "CONAN_SYSREQUIRES_SUDO": self._env_c("general.sysrequires_sudo", "CONAN_SYSREQUIRES_SUDO", "False"), "CONAN_SYSREQUIRES_MODE": self._env_c("general.sysrequires_mode", "CONAN_SYSREQUIRES_MODE", "enabled"), "CONAN_REQUEST_TIMEOUT": self._env_c("general.request_timeout", "CONAN_REQUEST_TIMEOUT", None), "CONAN_VS_INSTALLATION_PREFERENCE": self._env_c("general.vs_installation_preference", "CONAN_VS_INSTALLATION_PREFERENCE", None), "CONAN_RECIPE_LINTER": self._env_c("general.recipe_linter", "CONAN_RECIPE_LINTER", "True"), "CONAN_CPU_COUNT": self._env_c("general.cpu_count", "CONAN_CPU_COUNT", None), "CONAN_READ_ONLY_CACHE": self._env_c("general.read_only_cache", "CONAN_READ_ONLY_CACHE", None), "CONAN_USER_HOME_SHORT": self._env_c("general.user_home_short", "CONAN_USER_HOME_SHORT", None), "CONAN_VERBOSE_TRACEBACK": self._env_c("general.verbose_traceback", "CONAN_VERBOSE_TRACEBACK", None), # http://www.vtk.org/Wiki/CMake_Cross_Compiling "CONAN_CMAKE_GENERATOR": self._env_c("general.cmake_generator", "CONAN_CMAKE_GENERATOR", None), "CONAN_CMAKE_TOOLCHAIN_FILE": self._env_c("general.cmake_toolchain_file", "CONAN_CMAKE_TOOLCHAIN_FILE", None), "CONAN_CMAKE_SYSTEM_NAME": self._env_c("general.cmake_system_name", "CONAN_CMAKE_SYSTEM_NAME", None), "CONAN_CMAKE_SYSTEM_VERSION": self._env_c("general.cmake_system_version", "CONAN_CMAKE_SYSTEM_VERSION", None), "CONAN_CMAKE_SYSTEM_PROCESSOR": self._env_c("general.cmake_system_processor", "CONAN_CMAKE_SYSTEM_PROCESSOR", None), "CONAN_CMAKE_FIND_ROOT_PATH": self._env_c("general.cmake_find_root_path", "CONAN_CMAKE_FIND_ROOT_PATH", None), "CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM": self._env_c("general.cmake_find_root_path_mode_program", "CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM", None), "CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY": self._env_c("general.cmake_find_root_path_mode_library", "CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY", None), "CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE": self._env_c("general.cmake_find_root_path_mode_include", "CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE", None), "CONAN_BASH_PATH": self._env_c("general.bash_path", "CONAN_BASH_PATH", None), "CONAN_MAKE_PROGRAM": self._env_c("general.conan_make_program", "CONAN_MAKE_PROGRAM", None), "CONAN_TEMP_TEST_FOLDER": self._env_c("general.temp_test_folder", "CONAN_TEMP_TEST_FOLDER", "False"), "CONAN_SKIP_VS_PROJECTS_UPGRADE": self._env_c("general.skip_vs_projects_upgrade", "CONAN_SKIP_VS_PROJECTS_UPGRADE", "False"), "CONAN_PLUGINS": self._env_c("plugins", "CONAN_PLUGINS", None) } # Filter None values return {name: value for name, value in ret.items() if value is not None} def _env_c(self, var_name, env_var_name, default_value): env = os.environ.get(env_var_name, None) if env is not None: return env try: return unquote(self.get_item(var_name)) except ConanException: return default_value def get_item(self, item): if not item: return load(self.filename) tokens = item.split(".", 1) section_name = tokens[0] try: section = self.items(section_name) except NoSectionError: raise ConanException("'%s' is not a section of conan.conf" % section_name) if len(tokens) == 1: result = [] if section_name == "plugins": for key, _ in section: result.append(key) return ",".join(result) else: for section_item in section: result.append(" = ".join(section_item)) return "\n".join(result) else: key = tokens[1] try: value = dict(section)[key] if " #" in value: # Comments value = value[:value.find(" #")].strip() except KeyError: raise ConanException("'%s' doesn't exist in [%s]" % (key, section_name)) return value def set_item(self, key, value): tokens = key.split(".", 1) section_name = tokens[0] if not self.has_section(section_name): self.add_section(section_name) if len(tokens) == 1: # defining full section raise ConanException("You can't set a full section, please specify a key=value") key = tokens[1] super(ConanClientConfigParser, self).set(section_name, key, value) with open(self.filename, "w") as f: self.write(f) def rm_item(self, item): tokens = item.split(".", 1) section_name = tokens[0] if not self.has_section(section_name): raise ConanException("'%s' is not a section of conan.conf" % section_name) if len(tokens) == 1: self.remove_section(tokens[0]) else: key = tokens[1] if not self.has_option(section_name, key): raise ConanException("'%s' doesn't exist in [%s]" % (key, section_name)) self.remove_option(section_name, key) with open(self.filename, "w") as f: self.write(f) def get_conf(self, varname): """Gets the section from config file or raises an exception""" try: return self.items(varname) except NoSectionError: raise ConanException("Invalid configuration, missing %s" % varname) @property def default_profile(self): try: return self.get_item("general.default_profile") except ConanException: return DEFAULT_PROFILE_NAME @property def cache_no_locks(self): try: return self.get_item("general.cache_no_locks") except ConanException: return False @property def storage(self): return dict(self.get_conf("storage")) @property def request_timeout(self): try: return self.get_item("general.request_timeout") except ConanException: return None @property def storage_path(self): # Try with CONAN_STORAGE_PATH result = get_env('CONAN_STORAGE_PATH', None) # Try with conan.conf "path" if not result: try: env_conan_user_home = os.getenv("CONAN_USER_HOME") # if env var is declared, any specified path will be relative to CONAN_USER_HOME # even with the ~/ if env_conan_user_home: storage = self.storage["path"] if storage[:2] == "~/": storage = storage[2:] result = os.path.join(env_conan_user_home, storage) else: result = self.storage["path"] except KeyError: pass # expand the result and check if absolute if result: result = conan_expand_user(result) if not os.path.isabs(result): raise ConanException("Conan storage path has to be an absolute path") return result @property def proxies(self): """ optional field, might not exist """ try: proxies = self.get_conf("proxies") # If there is proxies section, but empty, it will try to use system proxy if not proxies: # We don't have evidences that this following line is necessary. # If the proxies has been # configured at system level, conan will use it, and shouldn't be necessary # to return here the proxies read from the system. # Furthermore, the urls excluded for use proxies at system level do not work in # this case, then the only way is to remove the [proxies] section with # conan config remote proxies, then this method will return None and the proxies # dict passed to requests will be empty. # We don't remove this line because we are afraid to break something, but maybe # until now is working because no one is using system-wide proxies or those proxies # rules don't contain excluded urls.c #1777 return urllib.request.getproxies() result = {k: (None if v == "None" else v) for k, v in proxies} return result except: return None
mit
jirikuncar/invenio
invenio/legacy/webstat/webinterface.py
13
54116
# This file is part of Invenio. # Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = "$Id$" __lastupdated__ = "$Date$" import os, sys from urllib import unquote from time import localtime from invenio.utils import apache from invenio.config import \ CFG_TMPDIR, \ CFG_SITE_URL, \ CFG_SITE_LANG, \ CFG_CERN_SITE from invenio.modules.indexer.tokenizers.BibIndexJournalTokenizer import CFG_JOURNAL_TAG from invenio.ext.legacy.handler import wash_urlargd, WebInterfaceDirectory from invenio.legacy.webpage import page from invenio.modules.access.engine import acc_authorize_action from invenio.modules.access.local_config import VIEWRESTRCOLL from invenio.legacy.search_engine import collection_restricted_p from invenio.legacy.webuser import collect_user_info, page_not_authorized from invenio.utils.url import redirect_to_url from invenio.legacy.webstat.api import perform_request_index, \ perform_display_keyevent, \ perform_display_customevent, \ perform_display_customevent_help, \ perform_display_error_log_analyzer, \ register_customevent, \ perform_display_custom_summary, \ perform_display_stats_per_coll, \ perform_display_current_system_health, \ perform_display_yearly_report, \ perform_display_coll_list, \ perform_display_ingestion_status def detect_suitable_graph_format(): """ Return suitable graph format default argument. It is always flot (when there wasn't plot, gnuplot if it is present, otherwise asciiart). """ return "flot" # try: # import Gnuplot # suitable_graph_format = "gnuplot" # except ImportError: # suitable_graph_format = "asciiart" # return suitable_graph_format SUITABLE_GRAPH_FORMAT = detect_suitable_graph_format() class WebInterfaceStatsPages(WebInterfaceDirectory): """Defines the set of stats pages.""" _exports = ['', 'system_health', 'systemhealth', 'yearly_report', 'ingestion_health', 'collection_population', 'new_records', 'search_frequency', 'search_type_distribution', 'download_frequency', 'comments_frequency', 'number_of_loans', 'web_submissions', 'loans_stats', 'loans_lists', 'renewals_lists', 'returns_table', 'returns_graph', 'ill_requests_stats', 'ill_requests_lists', 'ill_requests_graph', 'items_stats', 'items_list', 'loans_requests', 'loans_request_lists', 'user_stats', 'user_lists', 'error_log', 'customevent', 'customevent_help', 'customevent_register', 'custom_summary', 'collections' , 'collection_stats', 'export'] navtrail = """<a class="navtrail" href="%s/stats/%%(ln_link)s">Statistics</a>""" % CFG_SITE_URL def __call__(self, req, form): """Index page.""" argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='index', ln=ln) return page(title="Statistics", body=perform_request_index(ln=ln), description="Invenio, Statistics", keywords="Invenio, statistics", req=req, lastupdated=__lastupdated__, navmenuid='stats', language=ln) # CURRENT SYSTEM HEALTH def system_health(self, req, form): argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='current system health', ln=ln) return page(title="Current system health", body=perform_display_current_system_health(ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Current system health", keywords="Invenio, statistics, current system health", req=req, lastupdated=__lastupdated__, navmenuid='current system health', language=ln) def systemhealth(self, req, form): """Redirect for the old URL. """ return redirect_to_url (req, "%s/stats/system_health" % (CFG_SITE_URL)) def yearly_report(self, req, form): argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='yearly report', ln=ln) return page(title="Yearly report", body=perform_display_yearly_report(ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Yearly report", keywords="Invenio, statistics, yearly report", req=req, lastupdated=__lastupdated__, navmenuid='yearly report', language=ln) def ingestion_health(self, req, form): argd = wash_urlargd(form, { 'pattern': (str, None), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] req_ingestion = argd['pattern'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='ingestion status', ln=ln) return page(title="Check ingestion health", body=perform_display_ingestion_status(req_ingestion, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Ingestion health", keywords="Invenio, statistics, Ingestion health", req=req, lastupdated=__lastupdated__, navmenuid='ingestion health', language=ln) # KEY EVENT SECTION def collection_population(self, req, form): """Collection population statistics page.""" argd = wash_urlargd(form, {'collection': (str, "All"), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='collection population', ln=ln) return page(title="Collection population", body=perform_display_keyevent('collection population', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Collection population", keywords="Invenio, statistics, collection population", req=req, lastupdated=__lastupdated__, navmenuid='collection population', language=ln) def new_records(self, req, form): """Collection population statistics page.""" argd = wash_urlargd(form, {'collection': (str, "All"), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='new records', ln=ln) return page(title="New records", body=perform_display_keyevent('new records', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, New records", keywords="Invenio, statistics, new records", req=req, lastupdated=__lastupdated__, navmenuid='new records', language=ln) def search_frequency(self, req, form): """Search frequency statistics page.""" argd = wash_urlargd(form, {'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='search frequency', ln=ln) return page(title="Search frequency", body=perform_display_keyevent('search frequency', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Search frequency", keywords="Invenio, statistics, search frequency", req=req, lastupdated=__lastupdated__, navmenuid='search frequency', language=ln) def comments_frequency(self, req, form): """Comments frequency statistics page.""" argd = wash_urlargd(form, {'collection': (str, "All"), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='comments frequency', ln=ln) return page(title="Comments frequency", body=perform_display_keyevent('comments frequency', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Comments frequency", keywords="Invenio, statistics, Comments frequency", req=req, lastupdated=__lastupdated__, navmenuid='comments frequency', language=ln) def search_type_distribution(self, req, form): """Search type distribution statistics page.""" user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') argd = wash_urlargd(form, {'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='search type distribution', ln=ln) return page(title="Search type distribution", body=perform_display_keyevent('search type distribution', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Search type distribution", keywords="Invenio, statistics, search type distribution", req=req, lastupdated=__lastupdated__, navmenuid='search type distribution', language=ln) def download_frequency(self, req, form): """Download frequency statistics page.""" argd = wash_urlargd(form, {'collection': (str, "All"), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='download frequency', ln=ln) return page(title="Download frequency", body=perform_display_keyevent('download frequency', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Download frequency", keywords="Invenio, statistics, download frequency", req=req, lastupdated=__lastupdated__, navmenuid='download frequency', language=ln) def number_of_loans(self, req, form): """Number of loans statistics page.""" argd = wash_urlargd(form, {'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='number of circulation loans', ln=ln) return page(title="Number of circulation loans", body=perform_display_keyevent('number of loans', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Number of circulation loans", keywords="Invenio, statistics, Number of circulation loans", req=req, lastupdated=__lastupdated__, navmenuid='number of circulation loans', language=ln) def web_submissions(self, req, form): """Web submissions statistics page.""" argd = wash_urlargd(form, {'doctype': (str, "all"), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='web submissions', ln=ln) return page(title="Web submissions", body=perform_display_keyevent('web submissions', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Web submissions", keywords="Invenio, statistics, websubmissions", req=req, lastupdated=__lastupdated__, navmenuid='web submissions', language=ln) def loans_stats(self, req, form): """Number of loans statistics page.""" argd = wash_urlargd(form, {'udc': (str, ""), 'item_status': (str, ""), 'publication_date': (str, ""), 'creation_date': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation loans statistics', ln=ln) return page(title="Circulation loans statistics", body=perform_display_keyevent('loans statistics', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation loans statistics", keywords="Invenio, statistics, Circulation loans statistics", req=req, lastupdated=__lastupdated__, navmenuid='circulation loans statistics', language=ln) def loans_lists(self, req, form): """Number of loans lists page.""" argd = wash_urlargd(form, {'udc': (str, ""), 'loan_period': (str, ""), 'min_loans': (int, 0), 'max_loans': (int, sys.maxint), 'publication_date': (str, ""), 'creation_date': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) argd['min_loans'] = str(argd['min_loans']) argd['max_loans'] = str(argd['max_loans']) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation loans lists', ln=ln) return page(title="Circulation loans lists", body=perform_display_keyevent('loans lists', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation oans lists", keywords="Invenio, statistics, Circulation loans lists", req=req, lastupdated=__lastupdated__, navmenuid='circulation loans lists', language=ln) def renewals_lists(self, req, form): """Renewed items lists page.""" argd = wash_urlargd(form, {'udc': (str, ""), 'collection': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation renewals lists', ln=ln) return page(title="Circulation renewals lists", body=perform_display_keyevent('renewals', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation renewals lists", keywords="Invenio, statistics, Circulation renewals lists", req=req, lastupdated=__lastupdated__, navmenuid='circulation renewals lists', language=ln) def returns_table(self, req, form): """Number of returns table page.""" argd = wash_urlargd(form, {'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='Circulation returns table', ln=ln) return page(title="Circulation returns table", body=perform_display_keyevent('number returns', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation returns table", keywords="Invenio, statistics, Circulation returns table", req=req, lastupdated=__lastupdated__, navmenuid='circulation returns table', language=ln) def returns_graph(self, req, form): """Percentage of returns graph page.""" argd = wash_urlargd(form, {'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation returns graph', ln=ln) return page(title="Circulation returns graph", body=perform_display_keyevent('percentage returns', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation returns graph", keywords="Invenio, statistics, Circulation returns graph", req=req, lastupdated=__lastupdated__, navmenuid='circulation returns graph', language=ln) def ill_requests_stats(self, req, form): """ILL Requests statistics page.""" argd = wash_urlargd(form, {'doctype': (str, ""), 'status': (str, ""), 'supplier': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation ill requests statistics', ln=ln) return page(title="Circulation ILL Requests statistics", body=perform_display_keyevent('ill requests statistics', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation ILL Requests statistics", keywords="Invenio, statistics, Circulation ILL Requests statistics", req=req, lastupdated=__lastupdated__, navmenuid='circulation ill requests statistics', language=ln) def ill_requests_lists(self, req, form): """Number of loans lists page.""" argd = wash_urlargd(form, {'doctype': (str, ""), 'supplier': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation ill requests list', ln=ln) return page(title="Circulation ILL Requests list", body=perform_display_keyevent('ill requests list', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation ILL Requests list", keywords="Invenio, statistics, Circulation ILL Requests list", req=req, lastupdated=__lastupdated__, navmenuid='circulation ill requests list', language=ln) def ill_requests_graph(self, req, form): """Percentage of satisfied ILL requests graph page.""" argd = wash_urlargd(form, {'doctype': (str, ""), 'status': (str, ""), 'supplier': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='percentage circulation satisfied ill requests', ln=ln) return page(title="Percentage of circulation satisfied ILL requests", body=perform_display_keyevent('percentage satisfied ill requests', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Percentage of circulation satisfied ILL requests", keywords="Invenio, statistics, Percentage of circulation satisfied ILL requests", req=req, lastupdated=__lastupdated__, navmenuid='percentage circulation satisfied ill requests', language=ln) def items_stats(self, req, form): """ILL Requests statistics page.""" argd = wash_urlargd(form, {'udc': (str, ""), 'collection': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation items stats', ln=ln) return page(title="Circulation items statistics", body=perform_display_keyevent('items stats', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation items statistics", keywords="Invenio, statistics, Circulation items statistics", req=req, lastupdated=__lastupdated__, navmenuid='circulation items stats', language=ln) def items_list(self, req, form): """Number of loans lists page.""" argd = wash_urlargd(form, {'library': (str, ""), 'status': (str, ""), 'format': (str, ""), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation items list', ln=ln) return page(title="Circulation items list", body=perform_display_keyevent('items list', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation items list", keywords="Invenio, statistics, Circulation items list", req=req, lastupdated=__lastupdated__, navmenuid='circulation items list', language=ln) def loans_requests(self, req, form): """Number of loans statistics page.""" argd = wash_urlargd(form, {'item_status': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation loan request statistics', ln=ln) return page(title="Circulation hold requests statistics", body=perform_display_keyevent('loan request statistics', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation hold requests statistics", keywords="Invenio, statistics, Circulation hold requests statistics", req=req, lastupdated=__lastupdated__, navmenuid='circulation loan request statistics', language=ln) def loans_request_lists(self, req, form): """Number of loans request lists page.""" argd = wash_urlargd(form, {'udc': (str, ""), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation hold request lists', ln=ln) return page(title="Circulation loans request lists", body=perform_display_keyevent('loan request lists', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation hold request lists", keywords="Invenio, statistics, Circulation hold request lists", req=req, lastupdated=__lastupdated__, navmenuid='circulation hold request lists', language=ln) def user_stats(self, req, form): """Number of loans statistics page.""" argd = wash_urlargd(form, {'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation user statistics', ln=ln) return page(title="Circulation users statistics", body=perform_display_keyevent('user statistics', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation users statistics", keywords="Invenio, statistics, Circulation users statistics", req=req, lastupdated=__lastupdated__, navmenuid='circulation user statistics', language=ln) def user_lists(self, req, form): """Number of loans lists page.""" argd = wash_urlargd(form, {'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'sql': (int, 0), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='circulation users lists', ln=ln) return page(title="Circulation users lists", body=perform_display_keyevent('user lists', argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Circulation users lists", keywords="Invenio, statistics, Circulation users lists", req=req, lastupdated=__lastupdated__, navmenuid='circulation users lists', language=ln) # CUSTOM EVENT SECTION def customevent(self, req, form): """Custom event statistics page""" arg_format = {'ids': (list, []), 'timespan': (str, "today"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, SUITABLE_GRAPH_FORMAT), 'ln': (str, CFG_SITE_LANG)} for key in form.keys(): if key[:4] == 'cols': i = key[4:] arg_format['cols' + i] = (list, []) arg_format['col_value' + i] = (list, []) arg_format['bool' + i] = (list, []) argd = wash_urlargd(form, arg_format) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='custom event', ln=ln) body = perform_display_customevent(argd['ids'], argd, req=req, ln=ln) return page(title="Custom event", body=body, navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Custom event", keywords="Invenio, statistics, custom event", req=req, lastupdated=__lastupdated__, navmenuid='custom event', language=ln) def error_log(self, req, form): """Number of loans request lists page.""" argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='error log analyzer', ln=ln) return page(title="Error log analyzer", body=perform_display_error_log_analyzer(ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Error log analyzer", keywords="Invenio, statistics, Error log analyzer", req=req, lastupdated=__lastupdated__, navmenuid='error log analyzer', language=ln) def customevent_help(self, req, form): """Custom event help page""" argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='custom event help', ln=ln) return page(title="Custom event help", body=perform_display_customevent_help(ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Custom event help", keywords="Invenio, statistics, custom event help", req=req, lastupdated=__lastupdated__, navmenuid='custom event help', language=ln) def customevent_register(self, req, form): """Register a customevent and reload to it defined url""" argd = wash_urlargd(form, {'event_id': (str, ""), 'arg': (str, ""), 'url': (str, ""), 'ln': (str, CFG_SITE_LANG)}) params = argd['arg'].split(',') if "WEBSTAT_IP" in params: index = params.index("WEBSTAT_IP") params[index] = str(req.remote_ip) register_customevent(argd['event_id'], params) return redirect_to_url(req, unquote(argd['url']), apache.HTTP_MOVED_PERMANENTLY) # CUSTOM REPORT SECTION def custom_summary(self, req, form): """Custom report page""" if CFG_CERN_SITE: # NOTE: See RQF0382332 query = "690C_a:CERN and year:%i" % (localtime()[0],) else: query = "" argd = wash_urlargd(form, {'query': (str, query), 'tag': (str, CFG_JOURNAL_TAG.replace("%", "p")), 'title': (str, "Publications"), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='custom query summary', ln=ln) return page(title="Custom query summary", body=perform_display_custom_summary(argd, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Custom Query Summary", keywords="Invenio, statistics, custom query summary", req=req, lastupdated=__lastupdated__, navmenuid='custom query summary', language=ln) # COLLECTIONS SECTION def collection_stats(self, req, form): """Collection statistics list page""" argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, navmenuid='collections list', text=auth_msg, ln=ln) return page(title="Collection statistics", body=perform_display_coll_list(req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Collection statistics", keywords="Invenio, statistics", req=req, lastupdated=__lastupdated__, navmenuid='collections list', language=ln) def collections(self, req, form): """Collections statistics page""" argd = wash_urlargd(form, {'collection': (str, "All"), 'timespan': (str, "this month"), 's_date': (str, ""), 'f_date': (str, ""), 'format': (str, "flot"), 'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, navmenuid='collections', text=auth_msg, ln=ln) if collection_restricted_p(argd['collection']): (auth_code_coll, auth_msg_coll) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=argd['collection']) if auth_code_coll: return page_not_authorized(req, navmenuid='collections', text=auth_msg_coll, ln=ln) return page(title="Statistics of %s" % argd['collection'], body=perform_display_stats_per_coll(argd, req, ln=ln), navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \ (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''), description="Invenio, Statistics, Collection %s" % argd['collection'], keywords="Invenio, statistics, %s" % argd['collection'], req=req, lastupdated=__lastupdated__, navmenuid='collections', language=ln) # EXPORT SECTION def export(self, req, form): """Exports data""" argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) ln = argd['ln'] user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin') if auth_code: return page_not_authorized(req, navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''}, text=auth_msg, navmenuid='export', ln=ln) argd = wash_urlargd(form, {"filename": (str, ""), "mime": (str, "")}) # Check that the particular file exists and that it's OK to export webstat_files = [x for x in os.listdir(CFG_TMPDIR) if x.startswith("webstat")] if argd["filename"] not in webstat_files: return "Bad file." # Set correct header type req.content_type = argd["mime"] req.send_http_header() # Rebuild path, send it to the user, and clean up. filename = CFG_TMPDIR + '/' + argd["filename"] req.sendfile(filename) os.remove(filename) index = __call__
gpl-2.0
mhotwagner/abackend
abackend-env/lib/python3.5/site-packages/pip/_vendor/progress/__init__.py
916
3023
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division from collections import deque from datetime import timedelta from math import ceil from sys import stderr from time import time __version__ = '1.2' class Infinite(object): file = stderr sma_window = 10 def __init__(self, *args, **kwargs): self.index = 0 self.start_ts = time() self._ts = self.start_ts self._dt = deque(maxlen=self.sma_window) for key, val in kwargs.items(): setattr(self, key, val) def __getitem__(self, key): if key.startswith('_'): return None return getattr(self, key, None) @property def avg(self): return sum(self._dt) / len(self._dt) if self._dt else 0 @property def elapsed(self): return int(time() - self.start_ts) @property def elapsed_td(self): return timedelta(seconds=self.elapsed) def update(self): pass def start(self): pass def finish(self): pass def next(self, n=1): if n > 0: now = time() dt = (now - self._ts) / n self._dt.append(dt) self._ts = now self.index = self.index + n self.update() def iter(self, it): for x in it: yield x self.next() self.finish() class Progress(Infinite): def __init__(self, *args, **kwargs): super(Progress, self).__init__(*args, **kwargs) self.max = kwargs.get('max', 100) @property def eta(self): return int(ceil(self.avg * self.remaining)) @property def eta_td(self): return timedelta(seconds=self.eta) @property def percent(self): return self.progress * 100 @property def progress(self): return min(1, self.index / self.max) @property def remaining(self): return max(self.max - self.index, 0) def start(self): self.update() def goto(self, index): incr = index - self.index self.next(incr) def iter(self, it): try: self.max = len(it) except TypeError: pass for x in it: yield x self.next() self.finish()
mit
tchernomax/ansible
lib/ansible/modules/cloud/google/gcp_sql_user.py
10
11081
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_sql_user description: - The Users resource represents a database user in a Cloud SQL instance. short_description: Creates a GCP User version_added: 2.7 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: ['present', 'absent'] default: 'present' host: description: - The host name from which the user can connect. For insert operations, host defaults to an empty string. For update operations, host is specified as part of the request URL. The host name cannot be updated after insertion. required: true name: description: - The name of the user in the Cloud SQL instance. required: true instance: description: - The name of the Cloud SQL instance. This does not include the project ID. required: true password: description: - The password for the user. required: false extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: create a instance gcp_sql_instance: name: "instance-user" settings: ip_configuration: authorized_networks: - name: google dns server value: 8.8.8.8/32 tier: db-n1-standard-1 region: us-central1 project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: instance - name: create a user gcp_sql_user: name: test-user host: 10.1.2.3 password: secret-password instance: "{{ instance }}" project: "test_project" auth_kind: "service_account" service_account_file: "/tmp/auth.pem" state: present ''' RETURN = ''' host: description: - The host name from which the user can connect. For insert operations, host defaults to an empty string. For update operations, host is specified as part of the request URL. The host name cannot be updated after insertion. returned: success type: str name: description: - The name of the user in the Cloud SQL instance. returned: success type: str instance: description: - The name of the Cloud SQL instance. This does not include the project ID. returned: success type: dict password: description: - The password for the user. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict import json import time ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), host=dict(required=True, type='str'), name=dict(required=True, type='str'), instance=dict(required=True, type='dict'), password=dict(type='str') ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] state = module.params['state'] kind = 'sql#user' fetch = fetch_wrapped_resource(module, 'sql#user', 'sql#usersList', 'items') changed = False if fetch: if state == 'present': if is_different(module, fetch): fetch = update(module, self_link(module), kind) changed = True else: delete(module, self_link(module), kind) fetch = {} changed = True else: if state == 'present': fetch = create(module, collection(module), kind) changed = True else: fetch = {} fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link, kind): auth = GcpSession(module, 'sql') return wait_for_operation(module, auth.post(link, resource_to_request(module))) def update(module, link, kind): auth = GcpSession(module, 'sql') return wait_for_operation(module, auth.put(link, resource_to_request(module))) def delete(module, link, kind): auth = GcpSession(module, 'sql') return wait_for_operation(module, auth.delete(link)) def resource_to_request(module): request = { u'kind': 'sql#user', u'password': module.params.get('password'), u'host': module.params.get('host'), u'name': module.params.get('name') } return_vals = {} for k, v in request.items(): if v: return_vals[k] = v return return_vals def unwrap_resource_filter(module): return { 'host': module.params['host'], 'name': module.params['name'] } def unwrap_resource(result, module): query_predicate = unwrap_resource_filter(module) matched_items = [] for item in result: if all(item[k] == query_predicate[k] for k in query_predicate.keys()): matched_items.append(item) if len(matched_items) > 1: module.fail_json(msg="More than 1 result found: %s" % matched_items) if matched_items: return matched_items[0] else: return None def fetch_resource(module, link, kind): auth = GcpSession(module, 'sql') return return_if_object(module, auth.get(link), kind) def fetch_wrapped_resource(module, kind, wrap_kind, wrap_path): result = fetch_resource(module, self_link(module), wrap_kind) if result is None or wrap_path not in result: return None result = unwrap_resource(result[wrap_path], module) if result is None: return None if result['kind'] != kind: module.fail_json(msg="Incorrect result: {kind}".format(**result)) return result def self_link(module): res = { 'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name'), 'name': module.params['name'], 'host': module.params['host'] } return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users?name={name}&host={host}".format(**res) def collection(module): res = { 'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name') } return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users".format(**res) def return_if_object(module, response, kind): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None # SQL only: return on 403 if not exist if response.status_code == 403: return None try: result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) if result['kind'] != kind: module.fail_json(msg="Incorrect result: {kind}".format(**result)) return result def is_different(module, response): request = resource_to_request(module) response = response_to_hash(module, response) # Remove all output-only from response. response_vals = {} for k, v in response.items(): if k in request: response_vals[k] = v request_vals = {} for k, v in request.items(): if k in response: request_vals[k] = v return GcpRequest(request_vals) != GcpRequest(response_vals) # Remove unnecessary properties from the response. # This is for doing comparisons with Ansible's current parameters. def response_to_hash(module, response): return { u'host': response.get(u'host'), u'name': response.get(u'name') } def async_op_url(module, extra_data=None): if extra_data is None: extra_data = {} url = "https://www.googleapis.com/sql/v1beta4/projects/{project}/operations/{op_id}" combined = extra_data.copy() combined.update(module.params) return url.format(**combined) def wait_for_operation(module, response): op_result = return_if_object(module, response, 'sql#operation') if op_result is None: return {} status = navigate_hash(op_result, ['status']) wait_for_completion(status, op_result, module) return fetch_wrapped_resource(module, 'sql#user', 'sql#usersList', 'items') def wait_for_completion(status, op_result, module): op_id = navigate_hash(op_result, ['name']) op_uri = async_op_url(module, {'op_id': op_id}) while status != 'DONE': raise_if_errors(op_result, ['error', 'errors'], 'message') time.sleep(1.0) if status not in ['PENDING', 'RUNNING', 'DONE']: module.fail_json(msg="Invalid result %s" % status) op_result = fetch_resource(module, op_uri, 'sql#operation') status = navigate_hash(op_result, ['status']) return op_result def raise_if_errors(response, err_path, module): errors = navigate_hash(response, err_path) if errors is not None: module.fail_json(msg=errors) if __name__ == '__main__': main()
gpl-3.0
Trult/youtube-dl
youtube_dl/extractor/expotv.py
129
2591
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, ) class ExpoTVIE(InfoExtractor): _VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])' _TEST = { 'url': 'http://www.expotv.com/videos/reviews/1/24/LinneCardscom/17561', 'md5': '2985e6d7a392b2f7a05e0ca350fe41d0', 'info_dict': { 'id': '17561', 'ext': 'mp4', 'upload_date': '20060212', 'title': 'My Favorite Online Scrapbook Store', 'view_count': int, 'description': 'You\'ll find most everything you need at this virtual store front.', 'uploader': 'Anna T.', 'thumbnail': 're:^https?://.*\.jpg$', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) player_key = self._search_regex( r'<param name="playerKey" value="([^"]+)"', webpage, 'player key') config_url = 'http://client.expotv.com/video/config/%s/%s' % ( video_id, player_key) config = self._download_json( config_url, video_id, note='Downloading video configuration') formats = [{ 'url': fcfg['file'], 'height': int_or_none(fcfg.get('height')), 'format_note': fcfg.get('label'), 'ext': self._search_regex( r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'], 'file extension', default=None), } for fcfg in config['sources']] self._sort_formats(formats) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = config.get('image') view_count = int_or_none(self._search_regex( r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts')) uploader = self._search_regex( r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader', fatal=False) upload_date = unified_strdate(self._search_regex( r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date', fatal=False)) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'view_count': view_count, 'thumbnail': thumbnail, 'uploader': uploader, 'upload_date': upload_date, }
unlicense
nextgis/NextGIS_QGIS_open
python/plugins/GdalTools/tools/doPctRgb.py
10
6666
# -*- coding: utf-8 -*- """ *************************************************************************** doPctRgb.py --------------------- Date : June 2010 Copyright : (C) 2010 by Giuseppe Sucameli Email : brush dot tyler at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Giuseppe Sucameli' __date__ = 'June 2010' __copyright__ = '(C) 2010, Giuseppe Sucameli' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from PyQt4.QtCore import QObject, SIGNAL, QCoreApplication from PyQt4.QtGui import QWidget from ui_widgetConvert import Ui_GdalToolsWidget as Ui_Widget from widgetBatchBase import GdalToolsBaseBatchWidget as BaseBatchWidget import GdalTools_utils as Utils class GdalToolsDialog(QWidget, Ui_Widget, BaseBatchWidget): def __init__(self, iface): QWidget.__init__(self) self.iface = iface self.setupUi(self) BaseBatchWidget.__init__(self, self.iface, "pct2rgb.py") # we use one widget for two tools self.base.setWindowTitle( self.tr( "Convert paletted image to RGB" ) ) self.outSelector.setType( self.outSelector.FILE ) # set the default QSpinBoxes and QProgressBar value self.bandSpin.setValue(1) self.progressBar.setValue(0) self.progressBar.hide() self.outputFormat = Utils.fillRasterOutputFormat() self.setParamsStatus([ (self.inSelector, SIGNAL("filenameChanged()")), (self.outSelector, SIGNAL("filenameChanged()")), (self.colorsSpin, SIGNAL("valueChanged(int)"), self.colorsCheck, "-1"), # hide this option (self.bandSpin, SIGNAL("valueChanged(int)"), self.bandCheck) ]) self.connect(self.inSelector, SIGNAL("selectClicked()"), self.fillInputFile) self.connect(self.outSelector, SIGNAL("selectClicked()"), self.fillOutputFileEdit) self.connect( self.batchCheck, SIGNAL( "stateChanged( int )" ), self.switchToolMode ) # switch to batch or normal mode def switchToolMode( self ): self.setCommandViewerEnabled( not self.batchCheck.isChecked() ) self.progressBar.setVisible( self.batchCheck.isChecked() ) self.inSelector.setType( self.inSelector.FILE if self.batchCheck.isChecked() else self.inSelector.FILE_LAYER ) self.outSelector.clear() if self.batchCheck.isChecked(): self.inFileLabel = self.label.text() self.outFileLabel = self.label_2.text() self.label.setText( QCoreApplication.translate( "GdalTools", "&Input directory" ) ) self.label_2.setText( QCoreApplication.translate( "GdalTools", "&Output directory" ) ) QObject.disconnect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputFile ) QObject.disconnect( self.outSelector, SIGNAL( "selectClicked()" ), self.fillOutputFileEdit ) QObject.connect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputDir ) QObject.connect( self.outSelector, SIGNAL( "selectClicked()" ), self.fillOutputDir ) else: self.label.setText( self.inFileLabel ) self.label_2.setText( self.outFileLabel ) QObject.disconnect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputDir ) QObject.disconnect( self.outSelector, SIGNAL( "selectClicked()" ), self.fillOutputDir ) QObject.connect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputFile ) QObject.connect( self.outSelector, SIGNAL( "selectClicked()" ), self.fillOutputFileEdit ) def onLayersChanged(self): self.inSelector.setLayers( Utils.LayerRegistry.instance().getRasterLayers() ) def fillInputFile(self): lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter() inputFile = Utils.FileDialog.getOpenFileName(self, self.tr( "Select the input file for convert" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter ) if not inputFile: return Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter) self.inSelector.setFilename(inputFile) def fillOutputFileEdit(self): lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter() outputFile = Utils.FileDialog.getSaveFileName(self, self.tr( "Select the raster file to save the results to" ), Utils.FileFilter.saveRastersFilter(), lastUsedFilter ) if not outputFile: return Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter) self.outputFormat = Utils.fillRasterOutputFormat( lastUsedFilter, outputFile ) self.outSelector.setFilename(outputFile) def fillInputDir( self ): inputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the input directory with files for convert" )) if not inputDir: return self.inSelector.setFilename( inputDir ) def fillOutputDir( self ): outputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the output directory to save the results to" )) if not outputDir: return self.outSelector.setFilename( outputDir ) def getArguments(self): arguments = [] if self.bandCheck.isChecked(): arguments.append( "-b") arguments.append( str( self.bandSpin.value() )) if self.isBatchEnabled(): return arguments outputFn = self.getOutputFileName() if outputFn: arguments.append( "-of") arguments.append( self.outputFormat) arguments.append( self.getInputFileName()) arguments.append( outputFn) return arguments def getInputFileName(self): return self.inSelector.filename() def getOutputFileName(self): return self.outSelector.filename() def addLayerIntoCanvas(self, fileInfo): self.iface.addRasterLayer(fileInfo.filePath()) def isBatchEnabled(self): return self.batchCheck.isChecked() def setProgressRange(self, maximum): self.progressBar.setRange(0, maximum) def updateProgress(self, index, total): if index < total: self.progressBar.setValue( index + 1 ) else: self.progressBar.setValue( 0 )
gpl-2.0
allenlavoie/tensorflow
tensorflow/python/debug/cli/curses_widgets.py
157
6418
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Widgets for Curses-based CLI.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.debug.cli import debugger_cli_common RL = debugger_cli_common.RichLine class NavigationHistoryItem(object): """Individual item in navigation history.""" def __init__(self, command, screen_output, scroll_position): """Constructor of NavigationHistoryItem. Args: command: (`str`) the command line text. screen_output: the screen output of the command. scroll_position: (`int`) scroll position in the screen output. """ self.command = command self.screen_output = screen_output self.scroll_position = scroll_position class CursesNavigationHistory(object): """Navigation history containing commands, outputs and scroll info.""" BACK_ARROW_TEXT = "<--" FORWARD_ARROW_TEXT = "-->" def __init__(self, capacity): """Constructor of CursesNavigationHistory. Args: capacity: (`int`) How many items this object can hold. Each item consists of a command stirng, an output RichTextLines object and a scroll position. Raises: ValueError: If capacity is not a positive number. """ if capacity <= 0: raise ValueError("In valid capacity value: %d" % capacity) self._capacity = capacity self._items = [] self._pointer = -1 def add_item(self, command, screen_output, scroll_position): """Add an item to the navigation histoyr. Args: command: command line text. screen_output: screen output produced for the command. scroll_position: (`int`) scroll position in the screen output. """ if self._pointer + 1 < len(self._items): self._items = self._items[:self._pointer + 1] self._items.append( NavigationHistoryItem(command, screen_output, scroll_position)) if len(self._items) > self._capacity: self._items = self._items[-self._capacity:] self._pointer = len(self._items) - 1 def update_scroll_position(self, new_scroll_position): """Update the scroll position of the currently-pointed-to history item. Args: new_scroll_position: (`int`) new scroll-position value. Raises: ValueError: If the history is empty. """ if not self._items: raise ValueError("Empty navigation history") self._items[self._pointer].scroll_position = new_scroll_position def size(self): return len(self._items) def pointer(self): return self._pointer def go_back(self): """Go back one place in the history, if possible. Decrease the pointer value by 1, if possible. Otherwise, the pointer value will be unchanged. Returns: The updated pointer value. Raises: ValueError: If history is empty. """ if not self._items: raise ValueError("Empty navigation history") if self.can_go_back(): self._pointer -= 1 return self._items[self._pointer] def go_forward(self): """Go forward one place in the history, if possible. Increase the pointer value by 1, if possible. Otherwise, the pointer value will be unchanged. Returns: The updated pointer value. Raises: ValueError: If history is empty. """ if not self._items: raise ValueError("Empty navigation history") if self.can_go_forward(): self._pointer += 1 return self._items[self._pointer] def can_go_back(self): """Test whether client can go back one place. Returns: (`bool`) Whether going back one place is possible. """ return self._pointer >= 1 def can_go_forward(self): """Test whether client can go forward one place. Returns: (`bool`) Whether going back one place is possible. """ return self._pointer + 1 < len(self._items) def render(self, max_length, backward_command, forward_command, latest_command_attribute="black_on_white", old_command_attribute="magenta_on_white"): """Render the rich text content of the single-line navigation bar. Args: max_length: (`int`) Maximum length of the navigation bar, in characters. backward_command: (`str`) command for going backward. Used to construct the shortcut menu item. forward_command: (`str`) command for going forward. Used to construct the shortcut menu item. latest_command_attribute: font attribute for lastest command. old_command_attribute: font attribute for old (non-latest) command. Returns: (`debugger_cli_common.RichTextLines`) the navigation bar text with attributes. """ output = RL("| ") output += RL( self.BACK_ARROW_TEXT, (debugger_cli_common.MenuItem(None, backward_command) if self.can_go_back() else None)) output += RL(" ") output += RL( self.FORWARD_ARROW_TEXT, (debugger_cli_common.MenuItem(None, forward_command) if self.can_go_forward() else None)) if self._items: command_attribute = (latest_command_attribute if (self._pointer == (len(self._items) - 1)) else old_command_attribute) output += RL(" | ") if self._pointer != len(self._items) - 1: output += RL("(-%d) " % (len(self._items) - 1 - self._pointer), command_attribute) if len(output) < max_length: maybe_truncated_command = self._items[self._pointer].command[ :(max_length - len(output))] output += RL(maybe_truncated_command, command_attribute) return debugger_cli_common.rich_text_lines_from_rich_line_list([output])
apache-2.0
tbarrongh/cosc-learning-labs
src/bootcamp/02_all_devices/mount.py
3
1682
#!/usr/bin/env python2.7 # Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. """Use basics.topology.mount to mount all unmounted devices in settings.config['network_device'] Print the function's documentation then apply the function to every device that is configured and not mounted. """ from __future__ import print_function import pydoc import settings from basics import topology def mount_device(device_name): """Mount a single device by inserting in the NETCONF topology.""" device_config = settings.config['network_device'][device_name] print('device_mount(' + device_name, *device_config.values(), sep=', ', end=')\n') topology.mount( device_name, device_config['address'], device_config['port'], device_config['username'], device_config['password']) def main(): print(pydoc.plain(pydoc.render_doc(topology.mount))) unmounted_list = topology.unmounted_nodes() if unmounted_list: for device_name in unmounted_list: mount_device(device_name) else: print('There are no (configured) devices unmounted.') if __name__ == "__main__": main()
apache-2.0
crazy-cat/incubator-mxnet
benchmark/python/sparse/memory_benchmark.py
53
3792
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Should be run with valgrind to get memory consumption for sparse format storage and dot operators. This script can be used for memory benchmarking on CPU only""" import ctypes import sys import argparse import mxnet as mx from mxnet.test_utils import rand_ndarray from mxnet.base import check_call, _LIB def parse_args(): """ Function to parse arguments """ parser = argparse.ArgumentParser() parser.add_argument("--lhs-row-dim", required=True, help="Provide batch_size") parser.add_argument("--lhs-col-dim", required=True, help="Provide feature_dim") parser.add_argument("--rhs-col-dim", required=True, help="Provide output_dim") parser.add_argument("--density", required=True, help="Density for lhs") parser.add_argument("--num-omp-threads", type=int, default=1, help="number of omp threads to set in MXNet") parser.add_argument("--lhs-stype", default="csr", choices=["csr", "default", "row_sparse"], help="stype for lhs", required=True) parser.add_argument("--rhs-stype", default="default", choices=["default", "row_sparse"], help="rhs stype", required=True) parser.add_argument("--only-storage", action="store_true", help="only storage") parser.add_argument("--rhs-density", help="rhs_density") return parser.parse_args() def main(): args = parse_args() lhs_row_dim = int(args.lhs_row_dim) lhs_col_dim = int(args.lhs_col_dim) rhs_col_dim = int(args.rhs_col_dim) density = float(args.density) lhs_stype = args.lhs_stype rhs_stype = args.rhs_stype if args.rhs_density: rhs_density = float(args.rhs_density) else: rhs_density = density dot_func = mx.nd.sparse.dot if lhs_stype == "csr" else mx.nd.dot check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads))) bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density, rhs_density, dot_func, False, lhs_stype, rhs_stype, args.only_storage) def bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density, rhs_density, dot_func, trans_lhs, lhs_stype, rhs_stype, only_storage, distribution="uniform"): """ Benchmarking both storage and dot """ lhs_nd = rand_ndarray((lhs_row_dim, lhs_col_dim), lhs_stype, density, distribution=distribution) if not only_storage: rhs_nd = rand_ndarray((lhs_col_dim, rhs_col_dim), rhs_stype, density=rhs_density, distribution=distribution) out = dot_func(lhs_nd, rhs_nd, trans_lhs) mx.nd.waitall() if __name__ == '__main__': sys.exit(main())
apache-2.0
srimai/odoo
openerp/addons/test_new_api/tests/test_related.py
247
8446
# # test cases for related fields, etc. # import unittest from openerp.osv import fields from openerp.tests import common class TestRelatedField(common.TransactionCase): def setUp(self): super(TestRelatedField, self).setUp() self.partner = self.registry('res.partner') self.company = self.registry('res.company') def test_0_related(self): """ test an usual related field """ # find a company with a non-null partner_id ids = self.company.search(self.cr, self.uid, [('partner_id', '!=', False)], limit=1) id = ids[0] # find partners that satisfy [('partner_id.company_id', '=', id)] company_ids = self.company.search(self.cr, self.uid, [('partner_id', '=', id)]) partner_ids1 = self.partner.search(self.cr, self.uid, [('company_id', 'in', company_ids)]) partner_ids2 = self.partner.search(self.cr, self.uid, [('related_company_partner_id', '=', id)]) self.assertEqual(partner_ids1, partner_ids2) def do_test_company_field(self, field): # get a partner with a non-null company_id ids = self.partner.search(self.cr, self.uid, [('company_id', '!=', False)], limit=1) partner = self.partner.browse(self.cr, self.uid, ids[0]) # check reading related field self.assertEqual(partner[field], partner.company_id) # check that search on related field is equivalent to original field ids1 = self.partner.search(self.cr, self.uid, [('company_id', '=', partner.company_id.id)]) ids2 = self.partner.search(self.cr, self.uid, [(field, '=', partner.company_id.id)]) self.assertEqual(ids1, ids2) def test_1_single_related(self): """ test a related field with a single indirection like fields.related('foo') """ self.do_test_company_field('single_related_company_id') def test_2_related_related(self): """ test a related field referring to a related field """ self.do_test_company_field('related_related_company_id') def test_3_read_write(self): """ write on a related field """ # find a company with a non-null partner_id company_ids = self.company.search(self.cr, self.uid, [('partner_id', '!=', False)], limit=1) company = self.company.browse(self.cr, self.uid, company_ids[0]) # find partners that satisfy [('company_id.partner_id', '=', company.partner_id.id)] partner_ids = self.partner.search(self.cr, self.uid, [('related_company_partner_id', '=', company.partner_id.id)]) self.assertGreater(len(partner_ids), 0) partner = self.partner.browse(self.cr, self.uid, partner_ids[0]) # create a new partner, and assign it to company new_partner_id = self.partner.create(self.cr, self.uid, {'name': 'Foo'}) partner.write({'related_company_partner_id': new_partner_id}) company = self.company.browse(self.cr, self.uid, company_ids[0]) self.assertEqual(company.partner_id.id, new_partner_id) partner = self.partner.browse(self.cr, self.uid, partner_ids[0]) self.assertEqual(partner.related_company_partner_id.id, new_partner_id) class TestPropertyField(common.TransactionCase): def setUp(self): super(TestPropertyField, self).setUp() self.user = self.registry('res.users') self.partner = self.registry('res.partner') self.company = self.registry('res.company') self.country = self.registry('res.country') self.property = self.registry('ir.property') self.imd = self.registry('ir.model.data') @unittest.skip("invalid monkey-patching") def test_1_property_multicompany(self): cr, uid = self.cr, self.uid parent_company_id = self.imd.get_object_reference(cr, uid, 'base', 'main_company')[1] country_be = self.imd.get_object_reference(cr, uid, 'base', 'be')[1] country_fr = self.imd.get_object_reference(cr, uid, 'base', 'fr')[1] group_partner_manager = self.imd.get_object_reference(cr, uid, 'base', 'group_partner_manager')[1] group_multi_company = self.imd.get_object_reference(cr, uid, 'base', 'group_multi_company')[1] sub_company = self.company.create(cr, uid, {'name': 'MegaCorp', 'parent_id': parent_company_id}) alice = self.user.create(cr, uid, {'name': 'Alice', 'login':'alice', 'email':'alice@youcompany.com', 'company_id':parent_company_id, 'company_ids':[(6, 0, [parent_company_id, sub_company])], 'country_id':country_be, 'groups_id': [(6, 0, [group_partner_manager, group_multi_company])] }) bob = self.user.create(cr, uid, {'name': 'Bob', 'login':'bob', 'email':'bob@megacorp.com', 'company_id':sub_company, 'company_ids':[(6, 0, [parent_company_id, sub_company])], 'country_id':country_fr, 'groups_id': [(6, 0, [group_partner_manager, group_multi_company])] }) self.partner._columns = dict(self.partner._columns) self.partner._columns.update({ 'property_country': fields.property(type='many2one', relation="res.country", string="Country by company"), }) self.partner._field_create(cr) partner_id = self.partner.create(cr, alice, { 'name': 'An International Partner', 'email': 'partner@example.com', 'company_id': parent_company_id, }) self.partner.write(cr, bob, [partner_id], {'property_country': country_fr}) self.assertEqual(self.partner.browse(cr, bob, partner_id).property_country.id, country_fr, "Bob does not see the value he has set on the property field") self.partner.write(cr, alice, [partner_id], {'property_country': country_be}) self.assertEqual(self.partner.browse(cr, alice, partner_id).property_country.id, country_be, "Alice does not see the value he has set on the property field") self.assertEqual(self.partner.browse(cr, bob, partner_id).property_country.id, country_fr, "Changes made by Alice have overwritten Bob's value") class TestHtmlField(common.TransactionCase): def setUp(self): super(TestHtmlField, self).setUp() self.partner = self.registry('res.partner') def test_00_sanitize(self): cr, uid, context = self.cr, self.uid, {} old_columns = self.partner._columns self.partner._columns = dict(old_columns) self.partner._columns.update({ 'comment': fields.html('Secure Html', sanitize=False), }) some_ugly_html = """<p>Oops this should maybe be sanitized % if object.some_field and not object.oriented: <table> % if object.other_field: <tr style="border: 10px solid black;"> ${object.mako_thing} <td> </tr> % endif <tr> %if object.dummy_field: <p>Youpie</p> %endif""" pid = self.partner.create(cr, uid, { 'name': 'Raoul Poilvache', 'comment': some_ugly_html, }, context=context) partner = self.partner.browse(cr, uid, pid, context=context) self.assertEqual(partner.comment, some_ugly_html, 'Error in HTML field: content was sanitized but field has sanitize=False') self.partner._columns.update({ 'comment': fields.html('Unsecure Html', sanitize=True), }) self.partner.write(cr, uid, [pid], { 'comment': some_ugly_html, }, context=context) partner = self.partner.browse(cr, uid, pid, context=context) # sanitize should have closed tags left open in the original html self.assertIn('</table>', partner.comment, 'Error in HTML field: content does not seem to have been sanitized despise sanitize=True') self.assertIn('</td>', partner.comment, 'Error in HTML field: content does not seem to have been sanitized despise sanitize=True') self.assertIn('<tr style="', partner.comment, 'Style attr should not have been stripped') self.partner._columns['comment'] = fields.html('Stripped Html', sanitize=True, strip_style=True) self.partner.write(cr, uid, [pid], {'comment': some_ugly_html}, context=context) partner = self.partner.browse(cr, uid, pid, context=context) self.assertNotIn('<tr style="', partner.comment, 'Style attr should have been stripped') self.partner._columns = old_columns
agpl-3.0
JohnnyKing94/pootle
tests/formats/mozilla_lang.py
5
2668
# -*- coding: utf-8 -*- # # Copyright (C) Pootle contributors. # # This file is a part of the Pootle project. It is distributed under the GPL3 # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. import pytest from pytest_pootle.factories import StoreDBFactory from pootle_format.models import Format from pootle_store.constants import FUZZY, TRANSLATED from pootle_store.models import Unit @pytest.mark.django_db def test_mozlang_update(tp0): mozlang = Format.objects.get(name="lang") tp0.project.filetypes.add(mozlang) foo_lang = StoreDBFactory( name="foo.lang", filetype=mozlang, parent=tp0.directory, translation_project=tp0) store0 = tp0.stores.get(name="store0.po") # deserialize as source foo_lang.update(store0.deserialize(store0.serialize())) # get serialized lang store serialized = foo_lang.serialize() # mark a translated unit as fuzzy translated = foo_lang.units.filter(state=TRANSLATED).first() translated.state = FUZZY translated.save() # source is translated old_ttk = foo_lang.deserialize(serialized) foo_lang.update(old_ttk) # unit is still fuzzy translated.refresh_from_db() assert translated.state == FUZZY # source target changes state also gets updated old_ttk.findid(translated.getid()).target = "something else {ok}" foo_lang.update(old_ttk, store_revision=translated.revision) translated.refresh_from_db() assert translated.state == TRANSLATED translated = foo_lang.units.filter(state=TRANSLATED).first() translated.state = FUZZY translated.save() # set target == "" > untranslated ttk = foo_lang.deserialize(serialized) ttkunit = ttk.findid(translated.getid()) ttkunit.target = "" foo_lang.update(ttk) # unit stays FUZZY translated = Unit.objects.get(pk=translated.pk) assert translated.state == FUZZY @pytest.mark.django_db def test_mozlang_sync(tp0): mozlang = Format.objects.get(name="lang") tp0.project.filetypes.add(mozlang) foo_lang = StoreDBFactory( name="foo.lang", filetype=mozlang, parent=tp0.directory, translation_project=tp0) store0 = tp0.stores.get(name="store0.po") # deserialize as source foo_lang.update(store0.deserialize(store0.serialize())) # mark the unit as fuzzy unit = foo_lang.units.filter(state=TRANSLATED).first() unit.markfuzzy() unit.save() ttk = foo_lang.deserialize(foo_lang.serialize()) ttk_unit = ttk.findid(unit.getid()) assert not ttk_unit.istranslated()
gpl-3.0
OpenAcademy-OpenStack/nova-scheduler
nova/tests/virt/vmwareapi/test_volumeops.py
3
3079
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from nova import test from nova.tests.virt.vmwareapi import stubs from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import fake as vmwareapi_fake from nova.virt.vmwareapi import volumeops class VMwareVolumeOpsTestCase(test.NoDBTestCase): def setUp(self): def fake_del(): return super(VMwareVolumeOpsTestCase, self).setUp() vmwareapi_fake.reset() stubs.set_stubs(self.stubs) self._session = driver.VMwareAPISession() self.stubs.Set(self._session, '__del__', fake_del) self._volumeops = volumeops.VMwareVolumeOps(self._session) self.instance = {'name': 'fake_name', 'uuid': 'fake_uuid'} def _test_detach_disk_from_vm(self, destroy_disk=False): def fake_call_method(module, method, *args, **kwargs): vmdk_detach_config_spec = kwargs.get('spec') virtual_device_config = vmdk_detach_config_spec.deviceChange[0] self.assertEqual('remove', virtual_device_config.operation) self.assertEqual('ns0:VirtualDeviceConfigSpec', virtual_device_config.obj_name) if destroy_disk: self.assertEqual('destroy', virtual_device_config.fileOperation) else: self.assertFalse(hasattr(virtual_device_config, 'fileOperation')) return 'fake_configure_task' with contextlib.nested( mock.patch.object(self._session, '_wait_for_task'), mock.patch.object(self._session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): fake_device = vmwareapi_fake.DataObject() fake_device.backing = vmwareapi_fake.DataObject() fake_device.backing.fileName = 'fake_path' fake_device.key = 'fake_key' self._volumeops.detach_disk_from_vm('fake_vm_ref', self.instance, fake_device, destroy_disk) _wait_for_task.assert_has_calls([ mock.call(self.instance['uuid'], 'fake_configure_task')]) def test_detach_with_destroy_disk_from_vm(self): self._test_detach_disk_from_vm(destroy_disk=True) def test_detach_without_destroy_disk_from_vm(self): self._test_detach_disk_from_vm(destroy_disk=False)
apache-2.0
cstipkovic/spidermonkey-research
python/mozbuild/mozpack/files.py
1
37482
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import absolute_import import errno import os import platform import shutil import stat import subprocess import uuid import mozbuild.makeutil as makeutil from mozbuild.preprocessor import Preprocessor from mozbuild.util import FileAvoidWrite from mozpack.executables import ( is_executable, may_strip, strip, may_elfhack, elfhack, ) from mozpack.chrome.manifest import ManifestEntry from io import BytesIO from mozpack.errors import ( ErrorMessage, errors, ) from mozpack.mozjar import JarReader import mozpack.path as mozpath from collections import OrderedDict from jsmin import JavascriptMinify from tempfile import ( mkstemp, NamedTemporaryFile, ) try: import hglib except ImportError: hglib = None # For clean builds, copying files on win32 using CopyFile through ctypes is # ~2x as fast as using shutil.copyfile. if platform.system() != 'Windows': _copyfile = shutil.copyfile else: import ctypes _kernel32 = ctypes.windll.kernel32 _CopyFileA = _kernel32.CopyFileA _CopyFileW = _kernel32.CopyFileW def _copyfile(src, dest): # False indicates `dest` should be overwritten if it exists already. if isinstance(src, unicode) and isinstance(dest, unicode): _CopyFileW(src, dest, False) elif isinstance(src, str) and isinstance(dest, str): _CopyFileA(src, dest, False) else: raise TypeError('mismatched path types!') class Dest(object): ''' Helper interface for BaseFile.copy. The interface works as follows: - read() and write() can be used to sequentially read/write from the underlying file. - a call to read() after a write() will re-open the underlying file and read from it. - a call to write() after a read() will re-open the underlying file, emptying it, and write to it. ''' def __init__(self, path): self.path = path self.mode = None @property def name(self): return self.path def read(self, length=-1): if self.mode != 'r': self.file = open(self.path, 'rb') self.mode = 'r' return self.file.read(length) def write(self, data): if self.mode != 'w': self.file = open(self.path, 'wb') self.mode = 'w' return self.file.write(data) def exists(self): return os.path.exists(self.path) def close(self): if self.mode: self.mode = None self.file.close() class BaseFile(object): ''' Base interface and helper for file copying. Derived class may implement their own copy function, or rely on BaseFile.copy using the open() member function and/or the path property. ''' @staticmethod def is_older(first, second): ''' Compares the modification time of two files, and returns whether the ``first`` file is older than the ``second`` file. ''' # os.path.getmtime returns a result in seconds with precision up to # the microsecond. But microsecond is too precise because # shutil.copystat only copies milliseconds, and seconds is not # enough precision. return int(os.path.getmtime(first) * 1000) \ <= int(os.path.getmtime(second) * 1000) @staticmethod def any_newer(dest, inputs): ''' Compares the modification time of ``dest`` to multiple input files, and returns whether any of the ``inputs`` is newer (has a later mtime) than ``dest``. ''' # os.path.getmtime returns a result in seconds with precision up to # the microsecond. But microsecond is too precise because # shutil.copystat only copies milliseconds, and seconds is not # enough precision. dest_mtime = int(os.path.getmtime(dest) * 1000) for input in inputs: if dest_mtime < int(os.path.getmtime(input) * 1000): return True return False def copy(self, dest, skip_if_older=True): ''' Copy the BaseFile content to the destination given as a string or a Dest instance. Avoids replacing existing files if the BaseFile content matches that of the destination, or in case of plain files, if the destination is newer than the original file. This latter behaviour is disabled when skip_if_older is False. Returns whether a copy was actually performed (True) or not (False). ''' if isinstance(dest, basestring): dest = Dest(dest) else: assert isinstance(dest, Dest) can_skip_content_check = False if not dest.exists(): can_skip_content_check = True elif getattr(self, 'path', None) and getattr(dest, 'path', None): if skip_if_older and BaseFile.is_older(self.path, dest.path): return False elif os.path.getsize(self.path) != os.path.getsize(dest.path): can_skip_content_check = True if can_skip_content_check: if getattr(self, 'path', None) and getattr(dest, 'path', None): _copyfile(self.path, dest.path) shutil.copystat(self.path, dest.path) else: # Ensure the file is always created if not dest.exists(): dest.write('') shutil.copyfileobj(self.open(), dest) return True src = self.open() copy_content = '' while True: dest_content = dest.read(32768) src_content = src.read(32768) copy_content += src_content if len(dest_content) == len(src_content) == 0: break # If the read content differs between origin and destination, # write what was read up to now, and copy the remainder. if dest_content != src_content: dest.write(copy_content) shutil.copyfileobj(src, dest) break if hasattr(self, 'path') and hasattr(dest, 'path'): shutil.copystat(self.path, dest.path) return True def open(self): ''' Return a file-like object allowing to read() the content of the associated file. This is meant to be overloaded in subclasses to return a custom file-like object. ''' assert self.path is not None return open(self.path, 'rb') def read(self): raise NotImplementedError('BaseFile.read() not implemented. Bug 1170329.') @property def mode(self): ''' Return the file's unix mode, or None if it has no meaning. ''' return None class File(BaseFile): ''' File class for plain files. ''' def __init__(self, path): self.path = path @property def mode(self): ''' Return the file's unix mode, as returned by os.stat().st_mode. ''' if platform.system() == 'Windows': return None assert self.path is not None mode = os.stat(self.path).st_mode # Normalize file mode: # - keep file type (e.g. S_IFREG) ret = stat.S_IFMT(mode) # - expand user read and execute permissions to everyone if mode & 0400: ret |= 0444 if mode & 0100: ret |= 0111 # - keep user write permissions if mode & 0200: ret |= 0200 # - leave away sticky bit, setuid, setgid return ret def read(self): '''Return the contents of the file.''' with open(self.path, 'rb') as fh: return fh.read() class ExecutableFile(File): ''' File class for executable and library files on OS/2, OS/X and ELF systems. (see mozpack.executables.is_executable documentation). ''' def copy(self, dest, skip_if_older=True): real_dest = dest if not isinstance(dest, basestring): fd, dest = mkstemp() os.close(fd) os.remove(dest) assert isinstance(dest, basestring) # If File.copy didn't actually copy because dest is newer, check the # file sizes. If dest is smaller, it means it is already stripped and # elfhacked, so we can skip. if not File.copy(self, dest, skip_if_older) and \ os.path.getsize(self.path) > os.path.getsize(dest): return False try: if may_strip(dest): strip(dest) if may_elfhack(dest): elfhack(dest) except ErrorMessage: os.remove(dest) raise if real_dest != dest: f = File(dest) ret = f.copy(real_dest, skip_if_older) os.remove(dest) return ret return True class AbsoluteSymlinkFile(File): '''File class that is copied by symlinking (if available). This class only works if the target path is absolute. ''' def __init__(self, path): if not os.path.isabs(path): raise ValueError('Symlink target not absolute: %s' % path) File.__init__(self, path) def copy(self, dest, skip_if_older=True): assert isinstance(dest, basestring) # The logic in this function is complicated by the fact that symlinks # aren't universally supported. So, where symlinks aren't supported, we # fall back to file copying. Keep in mind that symlink support is # per-filesystem, not per-OS. # Handle the simple case where symlinks are definitely not supported by # falling back to file copy. if not hasattr(os, 'symlink'): return File.copy(self, dest, skip_if_older=skip_if_older) # Always verify the symlink target path exists. if not os.path.exists(self.path): raise ErrorMessage('Symlink target path does not exist: %s' % self.path) st = None try: st = os.lstat(dest) except OSError as ose: if ose.errno != errno.ENOENT: raise # If the dest is a symlink pointing to us, we have nothing to do. # If it's the wrong symlink, the filesystem must support symlinks, # so we replace with a proper symlink. if st and stat.S_ISLNK(st.st_mode): link = os.readlink(dest) if link == self.path: return False os.remove(dest) os.symlink(self.path, dest) return True # If the destination doesn't exist, we try to create a symlink. If that # fails, we fall back to copy code. if not st: try: os.symlink(self.path, dest) return True except OSError: return File.copy(self, dest, skip_if_older=skip_if_older) # Now the complicated part. If the destination exists, we could be # replacing a file with a symlink. Or, the filesystem may not support # symlinks. We want to minimize I/O overhead for performance reasons, # so we keep the existing destination file around as long as possible. # A lot of the system calls would be eliminated if we cached whether # symlinks are supported. However, even if we performed a single # up-front test of whether the root of the destination directory # supports symlinks, there's no guarantee that all operations for that # dest (or source) would be on the same filesystem and would support # symlinks. # # Our strategy is to attempt to create a new symlink with a random # name. If that fails, we fall back to copy mode. If that works, we # remove the old destination and move the newly-created symlink into # its place. temp_dest = os.path.join(os.path.dirname(dest), str(uuid.uuid4())) try: os.symlink(self.path, temp_dest) # TODO Figure out exactly how symlink creation fails and only trap # that. except EnvironmentError: return File.copy(self, dest, skip_if_older=skip_if_older) # If removing the original file fails, don't forget to clean up the # temporary symlink. try: os.remove(dest) except EnvironmentError: os.remove(temp_dest) raise os.rename(temp_dest, dest) return True class ExistingFile(BaseFile): ''' File class that represents a file that may exist but whose content comes from elsewhere. This purpose of this class is to account for files that are installed via external means. It is typically only used in manifests or in registries to account for files. When asked to copy, this class does nothing because nothing is known about the source file/data. Instances of this class come in two flavors: required and optional. If an existing file is required, it must exist during copy() or an error is raised. ''' def __init__(self, required): self.required = required def copy(self, dest, skip_if_older=True): if isinstance(dest, basestring): dest = Dest(dest) else: assert isinstance(dest, Dest) if not self.required: return if not dest.exists(): errors.fatal("Required existing file doesn't exist: %s" % dest.path) class PreprocessedFile(BaseFile): ''' File class for a file that is preprocessed. PreprocessedFile.copy() runs the preprocessor on the file to create the output. ''' def __init__(self, path, depfile_path, marker, defines, extra_depends=None, silence_missing_directive_warnings=False): self.path = path self.depfile = depfile_path self.marker = marker self.defines = defines self.extra_depends = list(extra_depends or []) self.silence_missing_directive_warnings = \ silence_missing_directive_warnings def copy(self, dest, skip_if_older=True): ''' Invokes the preprocessor to create the destination file. ''' if isinstance(dest, basestring): dest = Dest(dest) else: assert isinstance(dest, Dest) # We have to account for the case where the destination exists and is a # symlink to something. Since we know the preprocessor is certainly not # going to create a symlink, we can just remove the existing one. If the # destination is not a symlink, we leave it alone, since we're going to # overwrite its contents anyway. # If symlinks aren't supported at all, we can skip this step. if hasattr(os, 'symlink'): if os.path.islink(dest.path): os.remove(dest.path) pp_deps = set(self.extra_depends) # If a dependency file was specified, and it exists, add any # dependencies from that file to our list. if self.depfile and os.path.exists(self.depfile): target = mozpath.normpath(dest.name) with open(self.depfile, 'rb') as fileobj: for rule in makeutil.read_dep_makefile(fileobj): if target in rule.targets(): pp_deps.update(rule.dependencies()) skip = False if dest.exists() and skip_if_older: # If a dependency file was specified, and it doesn't exist, # assume that the preprocessor needs to be rerun. That will # regenerate the dependency file. if self.depfile and not os.path.exists(self.depfile): skip = False else: skip = not BaseFile.any_newer(dest.path, pp_deps) if skip: return False deps_out = None if self.depfile: deps_out = FileAvoidWrite(self.depfile) pp = Preprocessor(defines=self.defines, marker=self.marker) pp.setSilenceDirectiveWarnings(self.silence_missing_directive_warnings) with open(self.path, 'rU') as input: pp.processFile(input=input, output=dest, depfile=deps_out) dest.close() if self.depfile: deps_out.close() return True class GeneratedFile(BaseFile): ''' File class for content with no previous existence on the filesystem. ''' def __init__(self, content): self.content = content def open(self): return BytesIO(self.content) class DeflatedFile(BaseFile): ''' File class for members of a jar archive. DeflatedFile.copy() effectively extracts the file from the jar archive. ''' def __init__(self, file): from mozpack.mozjar import JarFileReader assert isinstance(file, JarFileReader) self.file = file def open(self): self.file.seek(0) return self.file class XPTFile(GeneratedFile): ''' File class for a linked XPT file. It takes several XPT files as input (using the add() and remove() member functions), and links them at copy() time. ''' def __init__(self): self._files = set() def add(self, xpt): ''' Add the given XPT file (as a BaseFile instance) to the list of XPTs to link. ''' assert isinstance(xpt, BaseFile) self._files.add(xpt) def remove(self, xpt): ''' Remove the given XPT file (as a BaseFile instance) from the list of XPTs to link. ''' assert isinstance(xpt, BaseFile) self._files.remove(xpt) def copy(self, dest, skip_if_older=True): ''' Link the registered XPTs and place the resulting linked XPT at the destination given as a string or a Dest instance. Avoids an expensive XPT linking if the interfaces in an existing destination match those of the individual XPTs to link. skip_if_older is ignored. ''' if isinstance(dest, basestring): dest = Dest(dest) assert isinstance(dest, Dest) from xpt import xpt_link, Typelib, Interface all_typelibs = [Typelib.read(f.open()) for f in self._files] if dest.exists(): # Typelib.read() needs to seek(), so use a BytesIO for dest # content. dest_interfaces = \ dict((i.name, i) for i in Typelib.read(BytesIO(dest.read())).interfaces if i.iid != Interface.UNRESOLVED_IID) identical = True for f in self._files: typelib = Typelib.read(f.open()) for i in typelib.interfaces: if i.iid != Interface.UNRESOLVED_IID and \ not (i.name in dest_interfaces and i == dest_interfaces[i.name]): identical = False break if identical: return False s = BytesIO() xpt_link(all_typelibs).write(s) dest.write(s.getvalue()) return True def open(self): raise RuntimeError("Unsupported") def isempty(self): ''' Return whether there are XPT files to link. ''' return len(self._files) == 0 class ManifestFile(BaseFile): ''' File class for a manifest file. It takes individual manifest entries (using the add() and remove() member functions), and adjusts them to be relative to the base path for the manifest, given at creation. Example: There is a manifest entry "content foobar foobar/content/" relative to "foobar/chrome". When packaging, the entry will be stored in jar:foobar/omni.ja!/chrome/chrome.manifest, which means the entry will have to be relative to "chrome" instead of "foobar/chrome". This doesn't really matter when serializing the entry, since this base path is not written out, but it matters when moving the entry at the same time, e.g. to jar:foobar/omni.ja!/chrome.manifest, which we don't do currently but could in the future. ''' def __init__(self, base, entries=None): self._entries = entries if entries else [] self._base = base def add(self, entry): ''' Add the given entry to the manifest. Entries are rebased at open() time instead of add() time so that they can be more easily remove()d. ''' assert isinstance(entry, ManifestEntry) self._entries.append(entry) def remove(self, entry): ''' Remove the given entry from the manifest. ''' assert isinstance(entry, ManifestEntry) self._entries.remove(entry) def open(self): ''' Return a file-like object allowing to read() the serialized content of the manifest. ''' return BytesIO(''.join('%s\n' % e.rebase(self._base) for e in self._entries)) def __iter__(self): ''' Iterate over entries in the manifest file. ''' return iter(self._entries) def isempty(self): ''' Return whether there are manifest entries to write ''' return len(self._entries) == 0 class MinifiedProperties(BaseFile): ''' File class for minified properties. This wraps around a BaseFile instance, and removes lines starting with a # from its content. ''' def __init__(self, file): assert isinstance(file, BaseFile) self._file = file def open(self): ''' Return a file-like object allowing to read() the minified content of the properties file. ''' return BytesIO(''.join(l for l in self._file.open().readlines() if not l.startswith('#'))) class MinifiedJavaScript(BaseFile): ''' File class for minifying JavaScript files. ''' def __init__(self, file, verify_command=None): assert isinstance(file, BaseFile) self._file = file self._verify_command = verify_command def open(self): output = BytesIO() minify = JavascriptMinify(self._file.open(), output, quote_chars="'\"`") minify.minify() output.seek(0) if not self._verify_command: return output input_source = self._file.open().read() output_source = output.getvalue() with NamedTemporaryFile() as fh1, NamedTemporaryFile() as fh2: fh1.write(input_source) fh2.write(output_source) fh1.flush() fh2.flush() try: args = list(self._verify_command) args.extend([fh1.name, fh2.name]) subprocess.check_output(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: errors.warn('JS minification verification failed for %s:' % (getattr(self._file, 'path', '<unknown>'))) # Prefix each line with "Warning:" so mozharness doesn't # think these error messages are real errors. for line in e.output.splitlines(): errors.warn(line) return self._file.open() return output class BaseFinder(object): def __init__(self, base, minify=False, minify_js=False, minify_js_verify_command=None): ''' Initializes the instance with a reference base directory. The optional minify argument specifies whether minification of code should occur. minify_js is an additional option to control minification of JavaScript. It requires minify to be True. minify_js_verify_command can be used to optionally verify the results of JavaScript minification. If defined, it is expected to be an iterable that will constitute the first arguments to a called process which will receive the filenames of the original and minified JavaScript files. The invoked process can then verify the results. If minification is rejected, the process exits with a non-0 exit code and the original JavaScript source is used. An example value for this argument is ('/path/to/js', '/path/to/verify/script.js'). ''' if minify_js and not minify: raise ValueError('minify_js requires minify.') self.base = base self._minify = minify self._minify_js = minify_js self._minify_js_verify_command = minify_js_verify_command def find(self, pattern): ''' Yield path, BaseFile_instance pairs for all files under the base directory and its subdirectories that match the given pattern. See the mozpack.path.match documentation for a description of the handled patterns. ''' while pattern.startswith('/'): pattern = pattern[1:] for p, f in self._find(pattern): yield p, self._minify_file(p, f) def get(self, path): """Obtain a single file. Where ``find`` is tailored towards matching multiple files, this method is used for retrieving a single file. Use this method when performance is critical. Returns a ``BaseFile`` if at most one file exists or ``None`` otherwise. """ files = list(self.find(path)) if len(files) != 1: return None return files[0][1] def __iter__(self): ''' Iterates over all files under the base directory (excluding files starting with a '.' and files at any level under a directory starting with a '.'). for path, file in finder: ... ''' return self.find('') def __contains__(self, pattern): raise RuntimeError("'in' operator forbidden for %s. Use contains()." % self.__class__.__name__) def contains(self, pattern): ''' Return whether some files under the base directory match the given pattern. See the mozpack.path.match documentation for a description of the handled patterns. ''' return any(self.find(pattern)) def _minify_file(self, path, file): ''' Return an appropriate MinifiedSomething wrapper for the given BaseFile instance (file), according to the file type (determined by the given path), if the FileFinder was created with minification enabled. Otherwise, just return the given BaseFile instance. ''' if not self._minify or isinstance(file, ExecutableFile): return file if path.endswith('.properties'): return MinifiedProperties(file) if self._minify_js and path.endswith(('.js', '.jsm')): return MinifiedJavaScript(file, self._minify_js_verify_command) return file def _find_helper(self, pattern, files, file_getter): """Generic implementation of _find. A few *Finder implementations share logic for returning results. This function implements the custom logic. The ``file_getter`` argument is a callable that receives a path that is known to exist. The callable should return a ``BaseFile`` instance. """ if '*' in pattern: for p in files: if mozpath.match(p, pattern): yield p, file_getter(p) elif pattern == '': for p in files: yield p, file_getter(p) elif pattern in files: yield pattern, file_getter(pattern) else: for p in files: if mozpath.basedir(p, [pattern]) == pattern: yield p, file_getter(p) class FileFinder(BaseFinder): ''' Helper to get appropriate BaseFile instances from the file system. ''' def __init__(self, base, find_executables=True, ignore=(), find_dotfiles=False, **kargs): ''' Create a FileFinder for files under the given base directory. The find_executables argument determines whether the finder needs to try to guess whether files are executables. Disabling this guessing when not necessary can speed up the finder significantly. ``ignore`` accepts an iterable of patterns to ignore. Entries are strings that match paths relative to ``base`` using ``mozpath.match()``. This means if an entry corresponds to a directory, all files under that directory will be ignored. If an entry corresponds to a file, that particular file will be ignored. ''' BaseFinder.__init__(self, base, **kargs) self.find_dotfiles = find_dotfiles self.find_executables = find_executables self.ignore = ignore def _find(self, pattern): ''' Actual implementation of FileFinder.find(), dispatching to specialized member functions depending on what kind of pattern was given. Note all files with a name starting with a '.' are ignored when scanning directories, but are not ignored when explicitely requested. ''' if '*' in pattern: return self._find_glob('', mozpath.split(pattern)) elif os.path.isdir(os.path.join(self.base, pattern)): return self._find_dir(pattern) else: f = self.get(pattern) return ((pattern, f),) if f else () def _find_dir(self, path): ''' Actual implementation of FileFinder.find() when the given pattern corresponds to an existing directory under the base directory. Ignores file names starting with a '.' under the given path. If the path itself has leafs starting with a '.', they are not ignored. ''' for p in self.ignore: if mozpath.match(path, p): return # The sorted makes the output idempotent. Otherwise, we are # likely dependent on filesystem implementation details, such as # inode ordering. for p in sorted(os.listdir(os.path.join(self.base, path))): if p.startswith('.'): if p in ('.', '..'): continue if not self.find_dotfiles: continue for p_, f in self._find(mozpath.join(path, p)): yield p_, f def get(self, path): srcpath = os.path.join(self.base, path) if not os.path.exists(srcpath): return None for p in self.ignore: if mozpath.match(path, p): return None if self.find_executables and is_executable(srcpath): return ExecutableFile(srcpath) else: return File(srcpath) def _find_glob(self, base, pattern): ''' Actual implementation of FileFinder.find() when the given pattern contains globbing patterns ('*' or '**'). This is meant to be an equivalent of: for p, f in self: if mozpath.match(p, pattern): yield p, f but avoids scanning the entire tree. ''' if not pattern: for p, f in self._find(base): yield p, f elif pattern[0] == '**': for p, f in self._find(base): if mozpath.match(p, mozpath.join(*pattern)): yield p, f elif '*' in pattern[0]: if not os.path.exists(os.path.join(self.base, base)): return for p in self.ignore: if mozpath.match(base, p): return # See above comment w.r.t. sorted() and idempotent behavior. for p in sorted(os.listdir(os.path.join(self.base, base))): if p.startswith('.') and not pattern[0].startswith('.'): continue if mozpath.match(p, pattern[0]): for p_, f in self._find_glob(mozpath.join(base, p), pattern[1:]): yield p_, f else: for p, f in self._find_glob(mozpath.join(base, pattern[0]), pattern[1:]): yield p, f class JarFinder(BaseFinder): ''' Helper to get appropriate DeflatedFile instances from a JarReader. ''' def __init__(self, base, reader, **kargs): ''' Create a JarFinder for files in the given JarReader. The base argument is used as an indication of the Jar file location. ''' assert isinstance(reader, JarReader) BaseFinder.__init__(self, base, **kargs) self._files = OrderedDict((f.filename, f) for f in reader) def _find(self, pattern): ''' Actual implementation of JarFinder.find(), dispatching to specialized member functions depending on what kind of pattern was given. ''' return self._find_helper(pattern, self._files, lambda x: DeflatedFile(self._files[x])) class ComposedFinder(BaseFinder): ''' Composes multiple File Finders in some sort of virtual file system. A ComposedFinder is initialized from a dictionary associating paths to *Finder instances. Note this could be optimized to be smarter than getting all the files in advance. ''' def __init__(self, finders): # Can't import globally, because of the dependency of mozpack.copier # on this module. from mozpack.copier import FileRegistry self.files = FileRegistry() for base, finder in sorted(finders.iteritems()): if self.files.contains(base): self.files.remove(base) for p, f in finder.find(''): self.files.add(mozpath.join(base, p), f) def find(self, pattern): for p in self.files.match(pattern): yield p, self.files[p] class MercurialFile(BaseFile): """File class for holding data from Mercurial.""" def __init__(self, client, rev, path): self._content = client.cat([path], rev=rev) def read(self): return self._content class MercurialRevisionFinder(BaseFinder): """A finder that operates on a specific Mercurial revision.""" def __init__(self, repo, rev='.', recognize_repo_paths=False, **kwargs): """Create a finder attached to a specific revision in a repository. If no revision is given, open the parent of the working directory. ``recognize_repo_paths`` will enable a mode where ``.get()`` will recognize full paths that include the repo's path. Typically Finder instances are "bound" to a base directory and paths are relative to that directory. This mode changes that. When this mode is activated, ``.find()`` will not work! This mode exists to support the moz.build reader, which uses absolute paths instead of relative paths. The reader should eventually be rewritten to use relative paths and this hack should be removed (TODO bug 1171069). """ if not hglib: raise Exception('hglib package not found') super(MercurialRevisionFinder, self).__init__(base=repo, **kwargs) self._root = mozpath.normpath(repo).rstrip('/') self._recognize_repo_paths = recognize_repo_paths # We change directories here otherwise we have to deal with relative # paths. oldcwd = os.getcwd() os.chdir(self._root) try: self._client = hglib.open(path=repo, encoding=b'utf-8') finally: os.chdir(oldcwd) self._rev = rev if rev is not None else b'.' self._files = OrderedDict() # Immediately populate the list of files in the repo since nearly every # operation requires this list. out = self._client.rawcommand([b'files', b'--rev', str(self._rev)]) for relpath in out.splitlines(): self._files[relpath] = None def _find(self, pattern): if self._recognize_repo_paths: raise NotImplementedError('cannot use find with recognize_repo_path') return self._find_helper(pattern, self._files, self._get) def get(self, path): if self._recognize_repo_paths: if not path.startswith(self._root): raise ValueError('lookups in recognize_repo_paths mode must be ' 'prefixed with repo path: %s' % path) path = path[len(self._root) + 1:] try: return self._get(path) except KeyError: return None def _get(self, path): # We lazy populate self._files because potentially creating tens of # thousands of MercurialFile instances for every file in the repo is # inefficient. f = self._files[path] if not f: f = MercurialFile(self._client, self._rev, path) self._files[path] = f return f
mpl-2.0
seize-the-dave/XlsxWriter
xlsxwriter/test/comparison/test_properties01.py
8
1587
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'properties01.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() workbook.set_properties({ 'title': 'This is an example spreadsheet', 'subject': 'With document properties', 'author': 'Someone', 'manager': 'Dr. Heinz Doofenshmirtz', 'company': 'of Wolves', 'category': 'Example spreadsheets', 'keywords': 'Sample, Example, Properties', 'comments': 'Created with Perl and Excel::Writer::XLSX', 'status': 'Quo'}) worksheet.set_column('A:A', 70) worksheet.write('A1', "Select 'Office Button -> Prepare -> Properties' to see the file properties.") workbook.close() self.assertExcelEqual()
bsd-2-clause
SirenCreatorTeam/Siren_Zwei
MIDI_Converter/convert.py
1
1360
# -*-coding: utf-8-*- import os import pretty_midi as pm import sys if __name__=='__main__': try: try: os.remove(os.path.splitext(sys.argv[1])[0] + "_track1.mid") print("Removed {!r}".format(os.path.splitext(sys.argv[1])[0] + "_track1.mid")) except: pass pmi = pm.PrettyMIDI(resolution=960, initial_tempo=120) inr = pm.Instrument(0) prm = pm.PrettyMIDI() prm = pm.PrettyMIDI(sys.argv[1]) for i in prm.instruments[0].notes: #try: i.pitch -= int(sys.argv[2], 10) print(i.pitch) #except: # print("a error is occerred") # break; print(prm.instruments[0]) inr.notes = prm.instruments[0].notes inr.program = prm.instruments[0].program inr.name = prm.instruments[0].name print(inr) pmi.instruments.append(inr) pmi.write(os.path.splitext(sys.argv[1])[0] + "_track1.mid") except IndexError: print("ARGUMENTS WAS NOT FOUND, BUT 1 MORE ARGUMENT IS REQUIRED.") except FileNotFoundError: print("THE FILE WAS NOT FOUND. PREASE SEEK YOUR NEEDED FILE.") except TypeError: print("THE SECOND ARGUMENT WAS NOT AN INTEGER. PREASE CONFIRM THE ARGUMENT AND THEN TRY AGAIN.")
bsd-2-clause
cpyou/odoo
openerp/report/custom.py
338
25091
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import os import time import openerp import openerp.tools as tools from openerp.tools.safe_eval import safe_eval as eval import print_xml import render from interface import report_int import common from openerp.osv.osv import except_osv from openerp.osv.orm import BaseModel from pychart import * import misc import cStringIO from lxml import etree from openerp.tools.translate import _ class external_pdf(render.render): def __init__(self, pdf): render.render.__init__(self) self.pdf = pdf self.output_type='pdf' def _render(self): return self.pdf theme.use_color = 1 #TODO: devrait heriter de report_rml a la place de report_int # -> pourrait overrider que create_xml a la place de tout create # heuu, ca marche pas ds tous les cas car graphs sont generes en pdf directment # par pychart, et on passe donc pas par du rml class report_custom(report_int): def __init__(self, name): report_int.__init__(self, name) # # PRE: # fields = [['address','city'],['name'], ['zip']] # conditions = [[('zip','==','3'),(,)],(,),(,)] #same structure as fields # row_canvas = ['Rue', None, None] # POST: # [ ['ville','name','zip'] ] # def _row_get(self, cr, uid, objs, fields, conditions, row_canvas=None, group_by=None): result = [] for obj in objs: tobreak = False for cond in conditions: if cond and cond[0]: c = cond[0] temp = c[0](eval('obj.'+c[1],{'obj': obj})) if not eval('\''+temp+'\''+' '+c[2]+' '+'\''+str(c[3])+'\''): tobreak = True if tobreak: break levels = {} row = [] for i in range(len(fields)): if not fields[i]: row.append(row_canvas and row_canvas[i]) if row_canvas[i]: row_canvas[i]=False elif len(fields[i])==1: if obj: row.append(str(eval('obj.'+fields[i][0],{'obj': obj}))) else: row.append(None) else: row.append(None) levels[fields[i][0]]=True if not levels: result.append(row) else: # Process group_by data first key = [] if group_by is not None and fields[group_by] is not None: if fields[group_by][0] in levels.keys(): key.append(fields[group_by][0]) for l in levels.keys(): if l != fields[group_by][0]: key.append(l) else: key = levels.keys() for l in key: objs = eval('obj.'+l,{'obj': obj}) if not isinstance(objs, (BaseModel, list)): objs = [objs] field_new = [] cond_new = [] for f in range(len(fields)): if (fields[f] and fields[f][0])==l: field_new.append(fields[f][1:]) cond_new.append(conditions[f][1:]) else: field_new.append(None) cond_new.append(None) if len(objs): result += self._row_get(cr, uid, objs, field_new, cond_new, row, group_by) else: result.append(row) return result def create(self, cr, uid, ids, datas, context=None): if not context: context={} self.pool = openerp.registry(cr.dbname) report = self.pool['ir.report.custom'].browse(cr, uid, [datas['report_id']])[0] datas['model'] = report.model_id.model if report.menu_id: ids = self.pool[report.model_id.model].search(cr, uid, []) datas['ids'] = ids report_id = datas['report_id'] report = self.pool['ir.report.custom'].read(cr, uid, [report_id], context=context)[0] fields = self.pool['ir.report.custom.fields'].read(cr, uid, report['fields_child0'], context=context) fields.sort(lambda x,y : x['sequence'] - y['sequence']) if report['field_parent']: parent_field = self.pool['ir.model.fields'].read(cr, uid, [report['field_parent'][0]], ['model']) model_name = self.pool['ir.model'].read(cr, uid, [report['model_id'][0]], ['model'], context=context)[0]['model'] fct = { 'id': lambda x: x, 'gety': lambda x: x.split('-')[0], 'in': lambda x: x.split(',') } new_fields = [] new_cond = [] for f in fields: row = [] cond = [] for i in range(4): field_child = f['field_child'+str(i)] if field_child: row.append( self.pool['ir.model.fields'].read(cr, uid, [field_child[0]], ['name'], context=context)[0]['name'] ) if f['fc'+str(i)+'_operande']: fct_name = 'id' cond_op = f['fc'+str(i)+'_op'] if len(f['fc'+str(i)+'_op'].split(',')) == 2: cond_op = f['fc'+str(i)+'_op'].split(',')[1] fct_name = f['fc'+str(i)+'_op'].split(',')[0] cond.append((fct[fct_name], f['fc'+str(i)+'_operande'][1], cond_op, f['fc'+str(i)+'_condition'])) else: cond.append(None) new_fields.append(row) new_cond.append(cond) objs = self.pool[model_name].browse(cr, uid, ids) # Group by groupby = None idx = 0 for f in fields: if f['groupby']: groupby = idx idx += 1 results = [] if report['field_parent']: level = [] def build_tree(obj, level, depth): res = self._row_get(cr, uid,[obj], new_fields, new_cond) level.append(depth) new_obj = eval('obj.'+report['field_parent'][1],{'obj': obj}) if not isinstance(new_obj, list) : new_obj = [new_obj] for o in new_obj: if o: res += build_tree(o, level, depth+1) return res for obj in objs: results += build_tree(obj, level, 0) else: results = self._row_get(cr, uid,objs, new_fields, new_cond, group_by=groupby) fct = { 'calc_sum': lambda l: reduce(lambda x,y: float(x)+float(y), filter(None, l), 0), 'calc_avg': lambda l: reduce(lambda x,y: float(x)+float(y), filter(None, l), 0) / (len(filter(None, l)) or 1.0), 'calc_max': lambda l: reduce(lambda x,y: max(x,y), [(i or 0.0) for i in l], 0), 'calc_min': lambda l: reduce(lambda x,y: min(x,y), [(i or 0.0) for i in l], 0), 'calc_count': lambda l: len(filter(None, l)), 'False': lambda l: '\r\n'.join(filter(None, l)), 'groupby': lambda l: reduce(lambda x,y: x or y, l) } new_res = [] prev = None if groupby is not None: res_dic = {} for line in results: if not line[groupby] and prev in res_dic: res_dic[prev].append(line) else: prev = line[groupby] res_dic.setdefault(line[groupby], []) res_dic[line[groupby]].append(line) #we use the keys in results since they are ordered, whereas in res_dic.heys() they aren't for key in filter(None, [x[groupby] for x in results]): row = [] for col in range(len(fields)): if col == groupby: row.append(fct['groupby'](map(lambda x: x[col], res_dic[key]))) else: row.append(fct[str(fields[col]['operation'])](map(lambda x: x[col], res_dic[key]))) new_res.append(row) results = new_res if report['type']=='table': if report['field_parent']: res = self._create_tree(uid, ids, report, fields, level, results, context) else: sort_idx = 0 for idx in range(len(fields)): if fields[idx]['name'] == report['sortby']: sort_idx = idx break try : results.sort(lambda x,y : cmp(float(x[sort_idx]),float(y[sort_idx]))) except : results.sort(lambda x,y : cmp(x[sort_idx],y[sort_idx])) if report['limitt']: results = results[:int(report['limitt'])] res = self._create_table(uid, ids, report, fields, None, results, context) elif report['type'] in ('pie','bar', 'line'): results2 = [] prev = False for r in results: row = [] for j in range(len(r)): if j == 0 and not r[j]: row.append(prev) elif j == 0 and r[j]: prev = r[j] row.append(r[j]) else: try: row.append(float(r[j])) except Exception: row.append(r[j]) results2.append(row) if report['type']=='pie': res = self._create_pie(cr,uid, ids, report, fields, results2, context) elif report['type']=='bar': res = self._create_bars(cr,uid, ids, report, fields, results2, context) elif report['type']=='line': res = self._create_lines(cr,uid, ids, report, fields, results2, context) return self.obj.get(), 'pdf' def _create_tree(self, uid, ids, report, fields, level, results, context): pageSize=common.pageSize.get(report['print_format'], [210.0,297.0]) if report['print_orientation']=='landscape': pageSize=[pageSize[1],pageSize[0]] new_doc = etree.Element('report') config = etree.SubElement(new_doc, 'config') def _append_node(name, text): n = etree.SubElement(config, name) n.text = text _append_node('date', time.strftime('%d/%m/%Y')) _append_node('PageFormat', '%s' % report['print_format']) _append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize)) _append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,)) _append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,)) length = pageSize[0]-30-reduce(lambda x,y:x+(y['width'] or 0), fields, 0) count = 0 for f in fields: if not f['width']: count+=1 for f in fields: if not f['width']: f['width']=round((float(length)/count)-0.5) _append_node('tableSize', '%s' % ','.join(map(lambda x: '%.2fmm' % (x['width'],), fields))) _append_node('report-header', '%s' % (report['title'],)) _append_node('report-footer', '%s' % (report['footer'],)) header = etree.SubElement(new_doc, 'header') for f in fields: field = etree.SubElement(header, 'field') field.text = f['name'] lines = etree.SubElement(new_doc, 'lines') level.reverse() for line in results: shift = level.pop() node_line = etree.SubElement(lines, 'row') prefix = '+' for f in range(len(fields)): col = etree.SubElement(node_line, 'col') if f == 0: col.attrib.update(para='yes', tree='yes', space=str(3*shift)+'mm') if line[f] is not None: col.text = prefix+str(line[f]) or '' else: col.text = '/' prefix = '' transform = etree.XSLT( etree.parse(os.path.join(tools.config['root_path'], 'addons/base/report/custom_new.xsl'))) rml = etree.tostring(transform(new_doc)) self.obj = render.rml(rml) self.obj.render() return True def _create_lines(self, cr, uid, ids, report, fields, results, context): pool = openerp.registry(cr.dbname) pdf_string = cStringIO.StringIO() can = canvas.init(fname=pdf_string, format='pdf') can.show(80,380,'/16/H'+report['title']) ar = area.T(size=(350,350), #x_coord = category_coord.T(['2005-09-01','2005-10-22'],0), x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"), y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:])))) process_date = { 'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]), 'M': lambda x: x.split('-')[1], 'Y': lambda x: x.split('-')[0] } order_date = { 'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)), 'M': lambda x: x, 'Y': lambda x: x } abscissa = [] idx = 0 date_idx = None fct = {} for f in fields: field_id = (f['field_child3'] and f['field_child3'][0]) or (f['field_child2'] and f['field_child2'][0]) or (f['field_child1'] and f['field_child1'][0]) or (f['field_child0'] and f['field_child0'][0]) if field_id: type = pool['ir.model.fields'].read(cr, uid, [field_id],['ttype']) if type[0]['ttype'] == 'date': date_idx = idx fct[idx] = process_date[report['frequency']] else: fct[idx] = lambda x : x else: fct[idx] = lambda x : x idx+=1 # plots are usually displayed year by year # so we do so if the first field is a date data_by_year = {} if date_idx is not None: for r in results: key = process_date['Y'](r[date_idx]) if key not in data_by_year: data_by_year[key] = [] for i in range(len(r)): r[i] = fct[i](r[i]) data_by_year[key].append(r) else: data_by_year[''] = results idx0 = 0 nb_bar = len(data_by_year)*(len(fields)-1) colors = map(lambda x:line_style.T(color=x), misc.choice_colors(nb_bar)) abscissa = {} for line in data_by_year.keys(): fields_bar = [] # sum data and save it in a list. An item for a fields for d in data_by_year[line]: for idx in range(len(fields)-1): fields_bar.append({}) if d[0] in fields_bar[idx]: fields_bar[idx][d[0]] += d[idx+1] else: fields_bar[idx][d[0]] = d[idx+1] for idx in range(len(fields)-1): data = {} for k in fields_bar[idx].keys(): if k in data: data[k] += fields_bar[idx][k] else: data[k] = fields_bar[idx][k] data_cum = [] prev = 0.0 keys = data.keys() keys.sort() # cumulate if necessary for k in keys: data_cum.append([k, float(data[k])+float(prev)]) if fields[idx+1]['cumulate']: prev += data[k] idx0 = 0 plot = line_plot.T(label=fields[idx+1]['name']+' '+str(line), data = data_cum, line_style=colors[idx0*(len(fields)-1)+idx]) ar.add_plot(plot) abscissa.update(fields_bar[idx]) idx0 += 1 abscissa = map(lambda x : [x, None], abscissa) ar.x_coord = category_coord.T(abscissa,0) ar.draw(can) can.close() self.obj = external_pdf(pdf_string.getvalue()) self.obj.render() pdf_string.close() return True def _create_bars(self, cr, uid, ids, report, fields, results, context): pool = openerp.registry(cr.dbname) pdf_string = cStringIO.StringIO() can = canvas.init(fname=pdf_string, format='pdf') can.show(80,380,'/16/H'+report['title']) process_date = { 'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]), 'M': lambda x: x.split('-')[1], 'Y': lambda x: x.split('-')[0] } order_date = { 'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)), 'M': lambda x: x, 'Y': lambda x: x } ar = area.T(size=(350,350), x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"), y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:])))) idx = 0 date_idx = None fct = {} for f in fields: field_id = (f['field_child3'] and f['field_child3'][0]) or (f['field_child2'] and f['field_child2'][0]) or (f['field_child1'] and f['field_child1'][0]) or (f['field_child0'] and f['field_child0'][0]) if field_id: type = pool['ir.model.fields'].read(cr, uid, [field_id],['ttype']) if type[0]['ttype'] == 'date': date_idx = idx fct[idx] = process_date[report['frequency']] else: fct[idx] = lambda x : x else: fct[idx] = lambda x : x idx+=1 # plot are usually displayed year by year # so we do so if the first field is a date data_by_year = {} if date_idx is not None: for r in results: key = process_date['Y'](r[date_idx]) if key not in data_by_year: data_by_year[key] = [] for i in range(len(r)): r[i] = fct[i](r[i]) data_by_year[key].append(r) else: data_by_year[''] = results nb_bar = len(data_by_year)*(len(fields)-1) colors = map(lambda x:fill_style.Plain(bgcolor=x), misc.choice_colors(nb_bar)) abscissa = {} for line in data_by_year.keys(): fields_bar = [] # sum data and save it in a list. An item for a fields for d in data_by_year[line]: for idx in range(len(fields)-1): fields_bar.append({}) if d[0] in fields_bar[idx]: fields_bar[idx][d[0]] += d[idx+1] else: fields_bar[idx][d[0]] = d[idx+1] for idx in range(len(fields)-1): data = {} for k in fields_bar[idx].keys(): if k in data: data[k] += fields_bar[idx][k] else: data[k] = fields_bar[idx][k] data_cum = [] prev = 0.0 keys = data.keys() keys.sort() # cumulate if necessary for k in keys: data_cum.append([k, float(data[k])+float(prev)]) if fields[idx+1]['cumulate']: prev += data[k] idx0 = 0 plot = bar_plot.T(label=fields[idx+1]['name']+' '+str(line), data = data_cum, cluster=(idx0*(len(fields)-1)+idx,nb_bar), fill_style=colors[idx0*(len(fields)-1)+idx]) ar.add_plot(plot) abscissa.update(fields_bar[idx]) idx0 += 1 abscissa = map(lambda x : [x, None], abscissa) abscissa.sort() ar.x_coord = category_coord.T(abscissa,0) ar.draw(can) can.close() self.obj = external_pdf(pdf_string.getvalue()) self.obj.render() pdf_string.close() return True def _create_pie(self, cr, uid, ids, report, fields, results, context): pdf_string = cStringIO.StringIO() can = canvas.init(fname=pdf_string, format='pdf') ar = area.T(size=(350,350), legend=legend.T(), x_grid_style = None, y_grid_style = None) colors = map(lambda x:fill_style.Plain(bgcolor=x), misc.choice_colors(len(results))) if reduce(lambda x,y : x+y, map(lambda x : x[1],results)) == 0.0: raise except_osv(_('Error'), _("The sum of the data (2nd field) is null.\nWe can't draw a pie chart !")) plot = pie_plot.T(data=results, arc_offsets=[0,10,0,10], shadow = (2, -2, fill_style.gray50), label_offset = 25, arrow_style = arrow.a3, fill_styles=colors) ar.add_plot(plot) ar.draw(can) can.close() self.obj = external_pdf(pdf_string.getvalue()) self.obj.render() pdf_string.close() return True def _create_table(self, uid, ids, report, fields, tree, results, context): pageSize=common.pageSize.get(report['print_format'], [210.0,297.0]) if report['print_orientation']=='landscape': pageSize=[pageSize[1],pageSize[0]] new_doc = etree.Element('report') config = etree.SubElement(new_doc, 'config') def _append_node(name, text): n = etree.SubElement(config, name) n.text = text _append_node('date', time.strftime('%d/%m/%Y')) _append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize)) _append_node('PageFormat', '%s' % report['print_format']) _append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,)) _append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,)) length = pageSize[0]-30-reduce(lambda x,y:x+(y['width'] or 0), fields, 0) count = 0 for f in fields: if not f['width']: count+=1 for f in fields: if not f['width']: f['width']=round((float(length)/count)-0.5) _append_node('tableSize', '%s' % ','.join(map(lambda x: '%.2fmm' % (x['width'],), fields))) _append_node('report-header', '%s' % (report['title'],)) _append_node('report-footer', '%s' % (report['footer'],)) header = etree.SubElement(new_doc, 'header') for f in fields: field = etree.SubElement(header, 'field') field.text = f['name'] lines = etree.SubElement(new_doc, 'lines') for line in results: node_line = etree.SubElement(lines, 'row') for f in range(len(fields)): col = etree.SubElement(node_line, 'col', tree='no') if line[f] is not None: col.text = line[f] or '' else: col.text = '/' transform = etree.XSLT( etree.parse(os.path.join(tools.config['root_path'], 'addons/base/report/custom_new.xsl'))) rml = etree.tostring(transform(new_doc)) self.obj = render.rml(rml) self.obj.render() return True report_custom('report.custom') # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
cjaymes/pyscap
src/scap/model/oval_5/sc/windows/EntityItemRegistryTypeType.py
1
1455
# Copyright 2016 Casey Jaymes # This file is part of PySCAP. # # PySCAP is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PySCAP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PySCAP. If not, see <http://www.gnu.org/licenses/>. import logging from scap.model.oval_5.sc.EntityItemType import EntityItemType logger = logging.getLogger(__name__) class EntityItemRegistryTypeType(EntityItemType): MODEL_MAP = { 'elements': [ ], 'attributes': { }, } def get_value_enum(self): return [ 'reg_binary', 'reg_dword', 'reg_dword_little_endian', 'reg_dword_big_endian', 'reg_expand_sz', 'reg_link', 'reg_multi_sz', 'reg_none', 'reg_qword', 'reg_qword_little_endian', 'reg_sz', 'reg_resource_list', 'reg_full_resource_descriptor', 'reg_resource_requirements_list', '', ]
gpl-3.0
denz/swarm
swarm/app.py
1
7179
from functools import wraps import logging import sys import os from inspect import isgeneratorfunction import signal from Queue import Empty from multiprocessing import TimeoutError, cpu_count from collections import Sequence from contextlib import contextmanager from . import signals from .helpers import (_PackageBoundObject, find_package, cached_property, obj_converter, obj_list_converter) from .globals import _swarm_ctx_stack, _atom_ctx_stack from .context import SwarmContext from .atomize import enqueue from .datastructures import ImmutableDict from .config import Config, ConfigAttribute from .storage import InstanceStorageMixin from .exceptions import Cancel NONE = object() def default_atomizer(args, kwargs): return ((args, kwargs),) class BaseSwarm(_PackageBoundObject, InstanceStorageMixin): default_config = ImmutableDict({ 'LOG_LEVEL': logging.ERROR, 'LOG_FORMAT': '%(asctime)s - %(name)s - %(funcName)s - %(processName)s - %(process)d - %(levelname)s - %(message)s', 'SAVE_STATE': False, # equal to interval hardcoded into `multiprocessing.Pool` 'RESOLUTION': 0.01, 'COMPETITORS': cpu_count(), #can be an import string 'ATOMIZER': default_atomizer, 'SWARM_CONTEXT_CLASS': 'swarm.context.SwarmContext', 'ATOM_CONTEXT_CLASS': 'swarm.context.AtomContext', 'RELOADED_MODULE':None, 'CORE_SIGNAL_MODULES':['swarm.signals.%s'%n for n in ('items', 'logging', 'state', )], }) swarm_context_class = ConfigAttribute('SWARM_CONTEXT_CLASS', obj_converter) atom_context_class = ConfigAttribute('ATOM_CONTEXT_CLASS', obj_converter) reloaded_module = ConfigAttribute('RELOADED_MODULE', get_converter=obj_list_converter) atomizer = ConfigAttribute('ATOMIZER', obj_converter) def __init__(self, import_name, instance_path=None, instance_relative_config=False, log_handler=None): _PackageBoundObject.__init__(self, import_name) if instance_path is None: instance_path = self.auto_find_instance_path() self.instance_path = instance_path self.config = self.make_config(instance_relative_config) @cached_property def name(self): """The name of the application. This is usually the import name with the difference that it's guessed from the run file if the import name is main. This name is used as a display name when Swarm needs the name of the application. It can be set and overriden to change the value. """ if self.import_name == '__main__': fn = getattr(sys.modules['__main__'], '__file__', None) if fn is None: return '__main__' return os.path.splitext(os.path.basename(fn))[0] return self.import_name def make_config(self, instance_relative=False): """Used to create the config attribute by the Flask constructor. The `instance_relative` parameter is passed in from the constructor of Flask (there named `instance_relative_config`) and indicates if the config should be relative to the instance path or the root path of the application. """ root_path = self.root_path if instance_relative: root_path = self.instance_path return Config(root_path, self.default_config) def connection(self): return () @contextmanager def swarm_context(self): if _swarm_ctx_stack.top is None: ctx = self.swarm_context_class(self, *self.connection()) with ctx.context(): yield ctx def sync(self, func): '''Decorator to mark syncronized functions''' @wraps(func) def wrapper(*args, **kwargs): topatoms = [] while _atom_ctx_stack.top: topatoms.append(_atom_ctx_stack.pop()) with self.swarm_context() as ctx: ctx.signal('before-start').send(ctx) ctx.log.debug('Starting synchronized %s'%func) for item in self.synchronized(ctx, func, args, kwargs): yield item ctx.log.debug('Finishing synchronized %s'%func) while topatoms: _atom_ctx_stack.push(topatoms.pop()) return wrapper def iteritems(self, items, ctx): ''' iter queue items until queue is not empty for `RESOLUTION` interval ''' try: while True: item = items.get(True, self.config['RESOLUTION']) if signals.cancel(ctx.signal('got-new-item').send(item)): continue yield item except Empty: pass def generate_subtasks(self, items, ctx): ''' Try to generate new tasks or yield from `items` queue ''' while ctx.results: try: result = ctx.results.popleft() tasks = result.get(self.config['RESOLUTION']) if signals.cancel(ctx.signal('got-new-tasks').send(tasks)): continue for task in tasks: enqueue(ctx, task) except TimeoutError: ctx.results.append(result) finally: for item in self.iteritems(items, ctx): yield item for item in self.iteritems(items, ctx): yield item def synchronized(self, ctx, func, args, kwargs): ''' Wrap syncronized function with generator and start subtasks generation ''' if isgeneratorfunction(func): def generator(items): for item in func(*args, **kwargs): yield item for item in self.generate_subtasks(items, ctx): yield item else: def generator(items): func(*args, **kwargs) for item in self.generate_subtasks(items, ctx): yield item items = ctx.manager.Queue() ctx.manager.setup_items(items) ctx.signal('before-syncronized-generation').send(ctx) for item in generator(items): yield item @property def log_handler(self): ''' Top level log handler ''' return logging.StreamHandler(stream=sys.stderr) @property def log_formatter(self): return logging.Formatter(self.config['LOG_FORMAT'])
bsd-3-clause
akash1808/tempest
tempest/common/glance_http.py
11
14527
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Originally copied from python-glanceclient import copy import hashlib import json import posixpath import re import socket import struct import OpenSSL from oslo_log import log as logging import six from six import moves from six.moves import http_client as httplib from six.moves.urllib import parse as urlparse from tempest_lib import exceptions as lib_exc from tempest import exceptions as exc LOG = logging.getLogger(__name__) USER_AGENT = 'tempest' CHUNKSIZE = 1024 * 64 # 64kB TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$') class HTTPClient(object): def __init__(self, auth_provider, filters, **kwargs): self.auth_provider = auth_provider self.filters = filters self.endpoint = auth_provider.base_url(filters) endpoint_parts = urlparse.urlparse(self.endpoint) self.endpoint_scheme = endpoint_parts.scheme self.endpoint_hostname = endpoint_parts.hostname self.endpoint_port = endpoint_parts.port self.endpoint_path = endpoint_parts.path self.connection_class = self.get_connection_class(self.endpoint_scheme) self.connection_kwargs = self.get_connection_kwargs( self.endpoint_scheme, **kwargs) @staticmethod def get_connection_class(scheme): if scheme == 'https': return VerifiedHTTPSConnection else: return httplib.HTTPConnection @staticmethod def get_connection_kwargs(scheme, **kwargs): _kwargs = {'timeout': float(kwargs.get('timeout', 600))} if scheme == 'https': _kwargs['ca_certs'] = kwargs.get('ca_certs', None) _kwargs['cert_file'] = kwargs.get('cert_file', None) _kwargs['key_file'] = kwargs.get('key_file', None) _kwargs['insecure'] = kwargs.get('insecure', False) _kwargs['ssl_compression'] = kwargs.get('ssl_compression', True) return _kwargs def get_connection(self): _class = self.connection_class try: return _class(self.endpoint_hostname, self.endpoint_port, **self.connection_kwargs) except httplib.InvalidURL: raise exc.EndpointNotFound def _http_request(self, url, method, **kwargs): """Send an http request with the specified characteristics. Wrapper around httplib.HTTP(S)Connection.request to handle tasks such as setting headers and error handling. """ # Copy the kwargs so we can reuse the original in case of redirects kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {})) kwargs['headers'].setdefault('User-Agent', USER_AGENT) self._log_request(method, url, kwargs['headers']) conn = self.get_connection() try: url_parts = urlparse.urlparse(url) conn_url = posixpath.normpath(url_parts.path) LOG.debug('Actual Path: {path}'.format(path=conn_url)) if kwargs['headers'].get('Transfer-Encoding') == 'chunked': conn.putrequest(method, conn_url) for header, value in kwargs['headers'].items(): conn.putheader(header, value) conn.endheaders() chunk = kwargs['body'].read(CHUNKSIZE) # Chunk it, baby... while chunk: conn.send('%x\r\n%s\r\n' % (len(chunk), chunk)) chunk = kwargs['body'].read(CHUNKSIZE) conn.send('0\r\n\r\n') else: conn.request(method, conn_url, **kwargs) resp = conn.getresponse() except socket.gaierror as e: message = ("Error finding address for %(url)s: %(e)s" % {'url': url, 'e': e}) raise exc.EndpointNotFound(message) except (socket.error, socket.timeout) as e: message = ("Error communicating with %(endpoint)s %(e)s" % {'endpoint': self.endpoint, 'e': e}) raise exc.TimeoutException(message) body_iter = ResponseBodyIterator(resp) # Read body into string if it isn't obviously image data if resp.getheader('content-type', None) != 'application/octet-stream': body_str = ''.join([body_chunk for body_chunk in body_iter]) body_iter = six.StringIO(body_str) self._log_response(resp, None) else: self._log_response(resp, body_iter) return resp, body_iter def _log_request(self, method, url, headers): LOG.info('Request: ' + method + ' ' + url) if headers: headers_out = headers if 'X-Auth-Token' in headers and headers['X-Auth-Token']: token = headers['X-Auth-Token'] if len(token) > 64 and TOKEN_CHARS_RE.match(token): headers_out = headers.copy() headers_out['X-Auth-Token'] = "<Token omitted>" LOG.info('Request Headers: ' + str(headers_out)) def _log_response(self, resp, body): status = str(resp.status) LOG.info("Response Status: " + status) if resp.getheaders(): LOG.info('Response Headers: ' + str(resp.getheaders())) if body: str_body = str(body) length = len(body) LOG.info('Response Body: ' + str_body[:2048]) if length >= 2048: self.LOG.debug("Large body (%d) md5 summary: %s", length, hashlib.md5(str_body).hexdigest()) def json_request(self, method, url, **kwargs): kwargs.setdefault('headers', {}) kwargs['headers'].setdefault('Content-Type', 'application/json') if kwargs['headers']['Content-Type'] != 'application/json': msg = "Only application/json content-type is supported." raise lib_exc.InvalidContentType(msg) if 'body' in kwargs: kwargs['body'] = json.dumps(kwargs['body']) resp, body_iter = self._http_request(url, method, **kwargs) if 'application/json' in resp.getheader('content-type', ''): body = ''.join([chunk for chunk in body_iter]) try: body = json.loads(body) except ValueError: LOG.error('Could not decode response body as JSON') else: msg = "Only json/application content-type is supported." raise lib_exc.InvalidContentType(msg) return resp, body def raw_request(self, method, url, **kwargs): kwargs.setdefault('headers', {}) kwargs['headers'].setdefault('Content-Type', 'application/octet-stream') if 'body' in kwargs: if (hasattr(kwargs['body'], 'read') and method.lower() in ('post', 'put')): # We use 'Transfer-Encoding: chunked' because # body size may not always be known in advance. kwargs['headers']['Transfer-Encoding'] = 'chunked' # Decorate the request with auth req_url, kwargs['headers'], kwargs['body'] = \ self.auth_provider.auth_request( method=method, url=url, headers=kwargs['headers'], body=kwargs.get('body', None), filters=self.filters) return self._http_request(req_url, method, **kwargs) class OpenSSLConnectionDelegator(object): """ An OpenSSL.SSL.Connection delegator. Supplies an additional 'makefile' method which httplib requires and is not present in OpenSSL.SSL.Connection. Note: Since it is not possible to inherit from OpenSSL.SSL.Connection a delegator must be used. """ def __init__(self, *args, **kwargs): self.connection = OpenSSL.SSL.Connection(*args, **kwargs) def __getattr__(self, name): return getattr(self.connection, name) def makefile(self, *args, **kwargs): # Ensure the socket is closed when this file is closed kwargs['close'] = True return socket._fileobject(self.connection, *args, **kwargs) class VerifiedHTTPSConnection(httplib.HTTPSConnection): """ Extended HTTPSConnection which uses the OpenSSL library for enhanced SSL support. Note: Much of this functionality can eventually be replaced with native Python 3.3 code. """ def __init__(self, host, port=None, key_file=None, cert_file=None, ca_certs=None, timeout=None, insecure=False, ssl_compression=True): httplib.HTTPSConnection.__init__(self, host, port, key_file=key_file, cert_file=cert_file) self.key_file = key_file self.cert_file = cert_file self.timeout = timeout self.insecure = insecure self.ssl_compression = ssl_compression self.ca_certs = ca_certs self.setcontext() @staticmethod def host_matches_cert(host, x509): """ Verify that the the x509 certificate we have received from 'host' correctly identifies the server we are connecting to, ie that the certificate's Common Name or a Subject Alternative Name matches 'host'. """ # First see if we can match the CN if x509.get_subject().commonName == host: return True # Also try Subject Alternative Names for a match san_list = None for i in moves.xrange(x509.get_extension_count()): ext = x509.get_extension(i) if ext.get_short_name() == 'subjectAltName': san_list = str(ext) for san in ''.join(san_list.split()).split(','): if san == "DNS:%s" % host: return True # Server certificate does not match host msg = ('Host "%s" does not match x509 certificate contents: ' 'CommonName "%s"' % (host, x509.get_subject().commonName)) if san_list is not None: msg = msg + ', subjectAltName "%s"' % san_list raise exc.SSLCertificateError(msg) def verify_callback(self, connection, x509, errnum, depth, preverify_ok): if x509.has_expired(): msg = "SSL Certificate expired on '%s'" % x509.get_notAfter() raise exc.SSLCertificateError(msg) if depth == 0 and preverify_ok is True: # We verify that the host matches against the last # certificate in the chain return self.host_matches_cert(self.host, x509) else: # Pass through OpenSSL's default result return preverify_ok def setcontext(self): """ Set up the OpenSSL context. """ self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) if self.ssl_compression is False: self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION if self.insecure is not True: self.context.set_verify(OpenSSL.SSL.VERIFY_PEER, self.verify_callback) else: self.context.set_verify(OpenSSL.SSL.VERIFY_NONE, self.verify_callback) if self.cert_file: try: self.context.use_certificate_file(self.cert_file) except Exception as e: msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e) raise exc.SSLConfigurationError(msg) if self.key_file is None: # We support having key and cert in same file try: self.context.use_privatekey_file(self.cert_file) except Exception as e: msg = ('No key file specified and unable to load key ' 'from "%s" %s' % (self.cert_file, e)) raise exc.SSLConfigurationError(msg) if self.key_file: try: self.context.use_privatekey_file(self.key_file) except Exception as e: msg = 'Unable to load key from "%s" %s' % (self.key_file, e) raise exc.SSLConfigurationError(msg) if self.ca_certs: try: self.context.load_verify_locations(self.ca_certs) except Exception as e: msg = 'Unable to load CA from "%s"' % (self.ca_certs, e) raise exc.SSLConfigurationError(msg) else: self.context.set_default_verify_paths() def connect(self): """ Connect to an SSL port using the OpenSSL library and apply per-connection parameters. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self.timeout is not None: # '0' microseconds sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, struct.pack('LL', self.timeout, 0)) self.sock = OpenSSLConnectionDelegator(self.context, sock) self.sock.connect((self.host, self.port)) def close(self): if self.sock: # Remove the reference to the socket but don't close it yet. # Response close will close both socket and associated # file. Closing socket too soon will cause response # reads to fail with socket IO error 'Bad file descriptor'. self.sock = None httplib.HTTPSConnection.close(self) class ResponseBodyIterator(object): """A class that acts as an iterator over an HTTP response.""" def __init__(self, resp): self.resp = resp def __iter__(self): while True: yield self.next() def next(self): chunk = self.resp.read(CHUNKSIZE) if chunk: return chunk else: raise StopIteration()
apache-2.0
benzid-wael/djangorestframework-utils
drf_utils/metadata.py
1
3591
# -*- coding: utf-8 -*- from __future__ import unicode_literals import collections from django.utils.encoding import force_text from django.core.validators import RegexValidator from rest_framework import metadata from rest_framework import serializers from rest_framework.utils.field_mapping import ClassLookupDict from rest_framework.fields import empty class VerboseMetadata(metadata.SimpleMetadata): """ This is an enhanced metadata implementation. It returns an ad-hoc set of information about the view. Compared to default REST metadata class, this class will remove any confuse about the view, For example, ``SimpleMetadata`` represents both BooleanField and NullBooleanField in the same form, etc. """ label_lookup = ClassLookupDict({ serializers.Field: 'field', serializers.BooleanField: 'boolean', serializers.NullBooleanField: 'boolean', serializers.CharField: 'string', serializers.URLField: 'url', serializers.EmailField: 'email', serializers.RegexField: 'regex', serializers.SlugField: 'slug', serializers.IntegerField: 'integer', serializers.FloatField: 'float', serializers.DecimalField: 'decimal', serializers.DateField: 'date', serializers.DateTimeField: 'datetime', serializers.TimeField: 'time', serializers.ChoiceField: 'choice', serializers.MultipleChoiceField: 'multiple choice', serializers.FileField: 'file upload', serializers.ImageField: 'image upload', serializers.ListField: 'list', serializers.DictField: 'nested object', serializers.Serializer: 'nested object', }) def get_field_info(self, field): """ Given an instance of a serializer field, return a dictionary of metadata about it. """ field_info = collections.OrderedDict() field_info['type'] = self.label_lookup[field] field_info['required'] = getattr(field, 'required', False) attrs = [ 'read_only', 'label', 'help_text', 'allow_null', 'min_length', 'max_length', 'min_value', 'max_value', ] # Handle default attribute default_value = getattr(field, 'default') if (default_value is not empty): field_info['default'] = force_text( default_value, strings_only=True) for attr in attrs: value = getattr(field, attr, None) if value is not None and value != '': field_info[attr] = force_text(value, strings_only=True) if hasattr(field, 'choices'): field_info['choices'] = [ { 'value': choice_value, 'display_name': force_text(choice_name, strings_only=True) } for choice_value, choice_name in field.choices.items() ] # handle RegexField if isinstance(field, serializers.RegexField): pattern = None for validator in field.validators: if isinstance(validator, RegexValidator): pattern = validator.regex.pattern break field_info['pattern'] = force_text(pattern, strings_only=True) # handle DecimalField if isinstance(field, serializers.DecimalField): for attr in ('max_digits', 'decimal_places'): field_info[attr] = force_text( getattr(field, attr), strings_only=True) return field_info
isc
fitermay/intellij-community
python/lib/Lib/encodings/ptcp154.py
647
8950
""" Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py. Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. (c) Copyright 2000 Guido van Rossum. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_map) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_map)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='ptcp154', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x0496, # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER 0x0081: 0x0492, # CYRILLIC CAPITAL LETTER GHE WITH STROKE 0x0082: 0x04ee, # CYRILLIC CAPITAL LETTER U WITH MACRON 0x0083: 0x0493, # CYRILLIC SMALL LETTER GHE WITH STROKE 0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK 0x0085: 0x2026, # HORIZONTAL ELLIPSIS 0x0086: 0x04b6, # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER 0x0087: 0x04ae, # CYRILLIC CAPITAL LETTER STRAIGHT U 0x0088: 0x04b2, # CYRILLIC CAPITAL LETTER HA WITH DESCENDER 0x0089: 0x04af, # CYRILLIC SMALL LETTER STRAIGHT U 0x008a: 0x04a0, # CYRILLIC CAPITAL LETTER BASHKIR KA 0x008b: 0x04e2, # CYRILLIC CAPITAL LETTER I WITH MACRON 0x008c: 0x04a2, # CYRILLIC CAPITAL LETTER EN WITH DESCENDER 0x008d: 0x049a, # CYRILLIC CAPITAL LETTER KA WITH DESCENDER 0x008e: 0x04ba, # CYRILLIC CAPITAL LETTER SHHA 0x008f: 0x04b8, # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE 0x0090: 0x0497, # CYRILLIC SMALL LETTER ZHE WITH DESCENDER 0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK 0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK 0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK 0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK 0x0095: 0x2022, # BULLET 0x0096: 0x2013, # EN DASH 0x0097: 0x2014, # EM DASH 0x0098: 0x04b3, # CYRILLIC SMALL LETTER HA WITH DESCENDER 0x0099: 0x04b7, # CYRILLIC SMALL LETTER CHE WITH DESCENDER 0x009a: 0x04a1, # CYRILLIC SMALL LETTER BASHKIR KA 0x009b: 0x04e3, # CYRILLIC SMALL LETTER I WITH MACRON 0x009c: 0x04a3, # CYRILLIC SMALL LETTER EN WITH DESCENDER 0x009d: 0x049b, # CYRILLIC SMALL LETTER KA WITH DESCENDER 0x009e: 0x04bb, # CYRILLIC SMALL LETTER SHHA 0x009f: 0x04b9, # CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE 0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U (Byelorussian) 0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U (Byelorussian) 0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE 0x00a4: 0x04e8, # CYRILLIC CAPITAL LETTER BARRED O 0x00a5: 0x0498, # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER 0x00a6: 0x04b0, # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE 0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO 0x00aa: 0x04d8, # CYRILLIC CAPITAL LETTER SCHWA 0x00ad: 0x04ef, # CYRILLIC SMALL LETTER U WITH MACRON 0x00af: 0x049c, # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE 0x00b1: 0x04b1, # CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE 0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I 0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I 0x00b4: 0x0499, # CYRILLIC SMALL LETTER ZE WITH DESCENDER 0x00b5: 0x04e9, # CYRILLIC SMALL LETTER BARRED O 0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO 0x00b9: 0x2116, # NUMERO SIGN 0x00ba: 0x04d9, # CYRILLIC SMALL LETTER SCHWA 0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE 0x00bd: 0x04aa, # CYRILLIC CAPITAL LETTER ES WITH DESCENDER 0x00be: 0x04ab, # CYRILLIC SMALL LETTER ES WITH DESCENDER 0x00bf: 0x049d, # CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE 0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A 0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE 0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE 0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE 0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE 0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE 0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE 0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE 0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I 0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I 0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA 0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL 0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM 0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN 0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O 0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE 0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER 0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES 0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE 0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U 0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF 0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA 0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE 0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE 0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA 0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA 0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN 0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU 0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN 0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E 0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU 0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA 0x00e0: 0x0430, # CYRILLIC SMALL LETTER A 0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE 0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE 0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE 0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE 0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE 0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE 0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE 0x00e8: 0x0438, # CYRILLIC SMALL LETTER I 0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I 0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA 0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL 0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM 0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN 0x00ee: 0x043e, # CYRILLIC SMALL LETTER O 0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE 0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER 0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES 0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE 0x00f3: 0x0443, # CYRILLIC SMALL LETTER U 0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF 0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA 0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE 0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE 0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA 0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA 0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN 0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU 0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN 0x00fd: 0x044d, # CYRILLIC SMALL LETTER E 0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU 0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA }) ### Encoding Map encoding_map = codecs.make_encoding_map(decoding_map)
apache-2.0
mmbtba/odoo
addons/website_report/controllers/main.py
243
1460
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.website.controllers.main import Website from openerp.http import request, route class Website(Website): @route() def customize_template_get(self, xml_id, full=False): res = super(Website, self).customize_template_get(xml_id, full=full) if full: for r in request.session.get('report_view_ids', []): res += super(Website, self).customize_template_get(r.get('xml_id'), full=full) return res
agpl-3.0
rupran/ansible
lib/ansible/modules/inventory/group_by.py
56
1483
# -*- mode: python -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: group_by short_description: Create Ansible groups based on facts description: - Use facts to create ad-hoc groups that can be used later in a playbook. version_added: "0.9" options: key: description: - The variables whose values will be used as groups required: true author: "Jeroen Hoekx (@jhoekx)" notes: - Spaces in group names are converted to dashes '-'. ''' EXAMPLES = ''' # Create groups based on the machine architecture - group_by: key: machine_{{ ansible_machine }} # Create groups like 'kvm-host' - group_by: key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }} '''
gpl-3.0
adieu/django-nonrel
extras/csrf_migration_helper.py
58
13030
#!/usr/bin/env python # This script aims to help developers locate forms and view code that needs to # use the new CSRF protection in Django 1.2. It tries to find all the code that # may need the steps described in the CSRF documentation. It does not modify # any code directly, it merely attempts to locate it. Developers should be # aware of its limitations, described below. # # For each template that contains at least one POST form, the following info is printed: # # <Absolute path to template> # AKA: <Aliases (relative to template directory/directories that contain it)> # POST forms: <Number of POST forms> # With token: <Number of POST forms with the CSRF token already added> # Without token: # <File name and line number of form without token> # # Searching for: # <Template names that need to be searched for in view code # (includes templates that 'include' current template)> # # Found: # <File name and line number of any view code found> # # The format used allows this script to be used in Emacs grep mode: # M-x grep # Run grep (like this): /path/to/my/virtualenv/python /path/to/django/src/extras/csrf_migration_helper.py --settings=mysettings /path/to/my/srcs # Limitations # =========== # # - All templates must be stored on disk in '.html' or '.htm' files. # (extensions configurable below) # # - All Python code must be stored on disk in '.py' files. (extensions # configurable below) # # - All templates must be accessible from TEMPLATE_DIRS or from the 'templates/' # directory in apps specified in INSTALLED_APPS. Non-file based template # loaders are out of the picture, because there is no way to ask them to # return all templates. # # - It's impossible to programmatically determine which forms should and should # not have the token added. The developer must decide when to do this, # ensuring that the token is only added to internally targetted forms. # # - It's impossible to programmatically work out when a template is used. The # attempts to trace back to view functions are guesses, and could easily fail # in the following ways: # # * If the 'include' template tag is used with a variable # i.e. {% include tname %} where tname is a variable containing the actual # template name, rather than {% include "my_template.html" %}. # # * If the template name has been built up by view code instead of as a simple # string. For example, generic views and the admin both do this. (These # apps are both contrib and both use RequestContext already, as it happens). # # * If the 'ssl' tag (or any template tag other than 'include') is used to # include the template in another template. # # - All templates belonging to apps referenced in INSTALLED_APPS will be # searched, which may include third party apps or Django contrib. In some # cases, this will be a good thing, because even if the templates of these # apps have been fixed by someone else, your own view code may reference the # same template and may need to be updated. # # You may, however, wish to comment out some entries in INSTALLED_APPS or # TEMPLATE_DIRS before running this script. # Improvements to this script are welcome! # Configuration # ============= TEMPLATE_EXTENSIONS = [ ".html", ".htm", ] PYTHON_SOURCE_EXTENSIONS = [ ".py", ] TEMPLATE_ENCODING = "UTF-8" PYTHON_ENCODING = "UTF-8" # Method # ====== # Find templates: # - template dirs # - installed apps # # Search for POST forms # - Work out what the name of the template is, as it would appear in an # 'include' or get_template() call. This can be done by comparing template # filename to all template dirs. Some templates can have more than one # 'name' e.g. if a directory and one of its child directories are both in # TEMPLATE_DIRS. This is actually a common hack used for # overriding-and-extending admin templates. # # For each POST form, # - see if it already contains '{% csrf_token %}' immediately after <form> # - work back to the view function(s): # - First, see if the form is included in any other templates, then # recursively compile a list of affected templates. # - Find any code function that references that template. This is just a # brute force text search that can easily return false positives # and fail to find real instances. import os import sys import re from optparse import OptionParser USAGE = """ This tool helps to locate forms that need CSRF tokens added and the corresponding view code. This processing is NOT fool proof, and you should read the help contained in the script itself. Also, this script may need configuring (by editing the script) before use. Usage: python csrf_migration_helper.py [--settings=path.to.your.settings] /path/to/python/code [more paths...] Paths can be specified as relative paths. With no arguments, this help is printed. """ _POST_FORM_RE = \ re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE) _FORM_CLOSE_RE = re.compile(r'</form\s*>') _TOKEN_RE = re.compile('\{% csrf_token') def get_template_dirs(): """ Returns a set of all directories that contain project templates. """ from django.conf import settings dirs = set() if ('django.template.loaders.filesystem.load_template_source' in settings.TEMPLATE_LOADERS or 'django.template.loaders.filesystem.Loader' in settings.TEMPLATE_LOADERS): dirs.update(map(unicode, settings.TEMPLATE_DIRS)) if ('django.template.loaders.app_directories.load_template_source' in settings.TEMPLATE_LOADERS or 'django.template.loaders.app_directories.Loader' in settings.TEMPLATE_LOADERS): from django.template.loaders.app_directories import app_template_dirs dirs.update(app_template_dirs) return dirs def make_template_info(filename, root_dirs): """ Creates a Template object for a filename, calculating the possible relative_filenames from the supplied filename and root template directories """ return Template(filename, [filename[len(d)+1:] for d in root_dirs if filename.startswith(d)]) class Template(object): def __init__(self, absolute_filename, relative_filenames): self.absolute_filename, self.relative_filenames = absolute_filename, relative_filenames def content(self): try: return self._content except AttributeError: fd = open(self.absolute_filename) try: content = fd.read().decode(TEMPLATE_ENCODING) except UnicodeDecodeError, e: message = '%s in %s' % ( e[4], self.absolute_filename.encode('UTF-8', 'ignore')) raise UnicodeDecodeError(*(e.args[:4] + (message,))) fd.close() self._content = content return content content = property(content) def post_form_info(self): """ Get information about any POST forms in the template. Returns [(linenumber, csrf_token added)] """ forms = {} form_line = 0 for ln, line in enumerate(self.content.split("\n")): if not form_line and _POST_FORM_RE.search(line): # record the form with no CSRF token yet form_line = ln + 1 forms[form_line] = False if form_line and _TOKEN_RE.search(line): # found the CSRF token forms[form_line] = True form_line = 0 if form_line and _FORM_CLOSE_RE.search(line): # no token found by form closing tag form_line = 0 return forms.items() def includes_template(self, t): """ Returns true if this template includes template 't' (via {% include %}) """ for r in t.relative_filenames: if re.search(r'\{%\s*include\s+(\'|")' + re.escape(r) + r'(\1)\s*%\}', self.content): return True return False def related_templates(self): """ Returns all templates that include this one, recursively. (starting with this one) """ try: return self._related_templates except AttributeError: pass retval = set([self]) for t in self.all_templates: if t.includes_template(self): # If two templates mutually include each other, directly or # indirectly, we have a problem here... retval = retval.union(t.related_templates()) self._related_templates = retval return retval def __repr__(self): return repr(self.absolute_filename) def __eq__(self, other): return self.absolute_filename == other.absolute_filename def __hash__(self): return hash(self.absolute_filename) def get_templates(dirs): """ Returns all files in dirs that have template extensions, as Template objects. """ templates = set() for root in dirs: for (dirpath, dirnames, filenames) in os.walk(root): for f in filenames: if len([True for e in TEMPLATE_EXTENSIONS if f.endswith(e)]) > 0: t = make_template_info(os.path.join(dirpath, f), dirs) # templates need to be able to search others: t.all_templates = templates templates.add(t) return templates def get_python_code(paths): """ Returns all Python code, as a list of tuples, each one being: (filename, list of lines) """ retval = [] for p in paths: if not os.path.isdir(p): raise Exception("'%s' is not a directory." % p) for (dirpath, dirnames, filenames) in os.walk(p): for f in filenames: if len([True for e in PYTHON_SOURCE_EXTENSIONS if f.endswith(e)]) > 0: fn = os.path.join(dirpath, f) fd = open(fn) content = [l.decode(PYTHON_ENCODING) for l in fd.readlines()] fd.close() retval.append((fn, content)) return retval def search_python_list(python_code, template_names): """ Searches python code for a list of template names. Returns a list of tuples, each one being: (filename, line number) """ retval = [] for tn in template_names: retval.extend(search_python(python_code, tn)) retval = list(set(retval)) retval.sort() return retval def search_python(python_code, template_name): """ Searches Python code for a template name. Returns a list of tuples, each one being: (filename, line number) """ retval = [] for fn, content in python_code: for ln, line in enumerate(content): if ((u'"%s"' % template_name) in line) or \ ((u"'%s'" % template_name) in line): retval.append((fn, ln + 1)) return retval def main(pythonpaths): template_dirs = get_template_dirs() templates = get_templates(template_dirs) python_code = get_python_code(pythonpaths) for t in templates: # Logic form_matches = t.post_form_info() num_post_forms = len(form_matches) form_lines_without_token = [ln for (ln, has_token) in form_matches if not has_token] if num_post_forms == 0: continue to_search = [rf for rt in t.related_templates() for rf in rt.relative_filenames] found = search_python_list(python_code, to_search) # Display: print t.absolute_filename for r in t.relative_filenames: print u" AKA %s" % r print u" POST forms: %s" % num_post_forms print u" With token: %s" % (num_post_forms - len(form_lines_without_token)) if form_lines_without_token: print u" Without token:" for ln in form_lines_without_token: print "%s:%d:" % (t.absolute_filename, ln) print print u" Searching for:" for r in to_search: print u" " + r print print u" Found:" if len(found) == 0: print " Nothing" else: for fn, ln in found: print "%s:%d:" % (fn, ln) print print "----" parser = OptionParser(usage=USAGE) parser.add_option("", "--settings", action="store", dest="settings", help="Dotted path to settings file") if __name__ == '__main__': options, args = parser.parse_args() if len(args) == 0: parser.print_help() sys.exit(1) settings = getattr(options, 'settings', None) if settings is None: if os.environ.get("DJANGO_SETTINGS_MODULE", None) is None: print "You need to set DJANGO_SETTINGS_MODULE or use the '--settings' parameter" sys.exit(1) else: os.environ["DJANGO_SETTINGS_MODULE"] = settings main(args)
bsd-3-clause
PennartLoettring/Poettrix
rootfs/usr/lib/python3.4/ctypes/test/test_keeprefs.py
65
4022
from ctypes import * import unittest class SimpleTestCase(unittest.TestCase): def test_cint(self): x = c_int() self.assertEqual(x._objects, None) x.value = 42 self.assertEqual(x._objects, None) x = c_int(99) self.assertEqual(x._objects, None) def test_ccharp(self): x = c_char_p() self.assertEqual(x._objects, None) x.value = b"abc" self.assertEqual(x._objects, b"abc") x = c_char_p(b"spam") self.assertEqual(x._objects, b"spam") class StructureTestCase(unittest.TestCase): def test_cint_struct(self): class X(Structure): _fields_ = [("a", c_int), ("b", c_int)] x = X() self.assertEqual(x._objects, None) x.a = 42 x.b = 99 self.assertEqual(x._objects, None) def test_ccharp_struct(self): class X(Structure): _fields_ = [("a", c_char_p), ("b", c_char_p)] x = X() self.assertEqual(x._objects, None) x.a = b"spam" x.b = b"foo" self.assertEqual(x._objects, {"0": b"spam", "1": b"foo"}) def test_struct_struct(self): class POINT(Structure): _fields_ = [("x", c_int), ("y", c_int)] class RECT(Structure): _fields_ = [("ul", POINT), ("lr", POINT)] r = RECT() r.ul.x = 0 r.ul.y = 1 r.lr.x = 2 r.lr.y = 3 self.assertEqual(r._objects, None) r = RECT() pt = POINT(1, 2) r.ul = pt self.assertEqual(r._objects, {'0': {}}) r.ul.x = 22 r.ul.y = 44 self.assertEqual(r._objects, {'0': {}}) r.lr = POINT() self.assertEqual(r._objects, {'0': {}, '1': {}}) class ArrayTestCase(unittest.TestCase): def test_cint_array(self): INTARR = c_int * 3 ia = INTARR() self.assertEqual(ia._objects, None) ia[0] = 1 ia[1] = 2 ia[2] = 3 self.assertEqual(ia._objects, None) class X(Structure): _fields_ = [("x", c_int), ("a", INTARR)] x = X() x.x = 1000 x.a[0] = 42 x.a[1] = 96 self.assertEqual(x._objects, None) x.a = ia self.assertEqual(x._objects, {'1': {}}) class PointerTestCase(unittest.TestCase): def test_p_cint(self): i = c_int(42) x = pointer(i) self.assertEqual(x._objects, {'1': i}) class DeletePointerTestCase(unittest.TestCase): def X_test(self): class X(Structure): _fields_ = [("p", POINTER(c_char_p))] x = X() i = c_char_p("abc def") from sys import getrefcount as grc print("2?", grc(i)) x.p = pointer(i) print("3?", grc(i)) for i in range(320): c_int(99) x.p[0] print(x.p[0]) ## del x ## print "2?", grc(i) ## del i import gc gc.collect() for i in range(320): c_int(99) x.p[0] print(x.p[0]) print(x.p.contents) ## print x._objects x.p[0] = "spam spam" ## print x.p[0] print("+" * 42) print(x._objects) class PointerToStructure(unittest.TestCase): def test(self): class POINT(Structure): _fields_ = [("x", c_int), ("y", c_int)] class RECT(Structure): _fields_ = [("a", POINTER(POINT)), ("b", POINTER(POINT))] r = RECT() p1 = POINT(1, 2) r.a = pointer(p1) r.b = pointer(p1) ## from pprint import pprint as pp ## pp(p1._objects) ## pp(r._objects) r.a[0].x = 42 r.a[0].y = 99 # to avoid leaking when tests are run several times # clean up the types left in the cache. from ctypes import _pointer_type_cache del _pointer_type_cache[POINT] if __name__ == "__main__": unittest.main()
gpl-2.0
HybridF5/jacket
jacket/objects/compute/virt_cpu_topology.py
1
1480
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from jacket.objects.compute import base from jacket.objects.compute import fields @base.NovaObjectRegistry.register class VirtCPUTopology(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'sockets': fields.IntegerField(nullable=True, default=1), 'cores': fields.IntegerField(nullable=True, default=1), 'threads': fields.IntegerField(nullable=True, default=1), } # NOTE(jaypipes): for backward compatibility, the virt CPU topology # data is stored in the database as a nested dict. @classmethod def from_dict(cls, data): return cls(sockets=data.get('sockets'), cores=data.get('cores'), threads=data.get('threads')) def to_dict(self): return { 'sockets': self.sockets, 'cores': self.cores, 'threads': self.threads }
apache-2.0
jamiethemorris/SPH-L710_Kernel
build-all.py
75
10510
#! /usr/bin/env python # Copyright (c) 2009-2011, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. # # TODO: Accept arguments to indicate what to build. # # Modify for supporting of the Samsung JF targets. # import glob from optparse import OptionParser import subprocess import os import os.path import shutil import sys import tarfile version = 'build-all.py, version 0.01' build_dir = '../../output/all-kernels' make_command = ["zImage", "modules"] make_env = os.environ pwd = os.environ.get("PWD") make_env.update({ 'ARCH': 'arm', 'CROSS_COMPILE': pwd + '/../prebuilts/gcc/linux-x86/arm/arm-eabi-4.7/bin/arm-eabi-', 'KCONFIG_NOTIMESTAMP': 'true' }) all_options = {} def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) defconfig = open(file, 'a') defconfig.write(str + '\n') defconfig.close() def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = {} for n in glob.glob('arch/arm/configs/jf_???_defconfig'): names[os.path.basename(n)[:-10]] = n return names class Builder: def __init__(self, logname): self.logname = logname self.fd = open(logname, 'w') def run(self, args): devnull = open('/dev/null', 'r') proc = subprocess.Popen(args, stdin=devnull, env=make_env, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) count = 0 # for line in proc.stdout: rawfd = proc.stdout.fileno() while True: line = os.read(rawfd, 1024) if not line: break self.fd.write(line) self.fd.flush() if all_options.verbose: sys.stdout.write(line) sys.stdout.flush() else: for i in range(line.count('\n')): count += 1 if count == 64: count = 0 print sys.stdout.write('.') sys.stdout.flush() print result = proc.wait() self.fd.close() return result failed_targets = [] def build(target): dest_dir = os.path.join(build_dir, target) log_name = '%s/log-%s.log' % (build_dir, target) zImage_name = '%s/arch/arm/boot/zImage' % (dest_dir) bootImage_name = '%s/arch/arm/boot/boot.img' % (dest_dir) signedImage_name = '%s/arch/arm/boot/signed_boot.img' % (dest_dir) tarball_name = '%s/%s.tar' % (build_dir, target) if target == 'jf_att': signing = "SGH-I337_NA_ATT_C" elif target == 'jactive_att': signing = "SGH-I537_NA_ATT_C" elif target == 'jf_tmo': signing = "SGH-M919_NA_TMB_C" elif target == 'jf_vzw': signing = "SCH-I545_NA_VZW_C" elif target == 'jf_spr': signing = "SPH-L720_NA_SPR_C" elif target == 'jf_cri': signing = "SCH-R970C_NA_CRI_C" elif target == 'jf_usc': signing = "SCH-R970_NA_USC_C" elif target == 'jf_eur': signing = "GT-I9505_EUR_XX_C" print 'Building %s in %s log %s' % (target, dest_dir, log_name) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) defconfig = 'arch/arm/configs/%s_defconfig' % target[:-4] dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir shutil.copyfile(defconfig, dotconfig) devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'VARIANT_DEFCONFIG=%s_defconfig' % target, 'DEBUG_DEFCONFIG=%seng_defconfig' % target[:-4], # 'SELINUX_DEFCONFIG=%sselinux_defconfig' % target[:-4], # 'SELINUX_LOG_DEFCONFIG=%sselinux_log_defconfig' % target[:-4], '%s_defconfig' % target[:-4]], env=make_env, stdin=devnull) devnull.close() if not all_options.updateconfigs: build = Builder(log_name) result = build.run(['make', 'O=%s' % dest_dir] + make_command) if result != 0: if all_options.keep_going: failed_targets.append(target) fail_or_error = error else: fail_or_error = fail fail_or_error("Failed to build %s, see %s" % (target, build.logname)) if result == 0: os.rename(zImage_name, bootImage_name) os.system('java -jar ../../buildscript/tools/signclient.jar -runtype ss_openssl_all -model %s -input %s -output %s' %(signing,bootImage_name,signedImage_name)) tar = tarfile.open(tarball_name, "w") tar.add(signedImage_name, arcname='boot.img') tar.close() # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=make_env, stdin=devnull) devnull.close() shutil.copyfile(savedefconfig, defconfig) def build_many(allconf, targets): print "Building %d target(s)" % len(targets) for target in targets: if all_options.updateconfigs: update_config(allconf[target], all_options.updateconfigs) build(target) if failed_targets: fail('\n '.join(["Failed targets:"] + [target for target in failed_targets])) def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] jf_att jf_vzw jf_tmo jf_spr ... -- List specific targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) parser.add_option('-i', '--ignore-errors', action='store_true', dest="ignore", help="Ignore errors from commands") (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs.keys(): print " %s" % target sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if options.jobs: make_command.append("-j%d" % options.jobs) if options.load_average: make_command.append("-l%d" % options.load_average) if options.ignore: make_command.append("-i") make_command.append("-k") if args == ['all']: build_many(configs, configs.keys()) elif len(args) > 0: targets = [] for t in args: if t not in configs.keys(): parser.error("Target '%s' not one of %s" % (t, configs.keys())) targets.append(t) build_many(configs, targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
manthansharma/kivy
examples/3Drendering/objloader.py
17
4665
class MeshData(object): def __init__(self, **kwargs): self.name = kwargs.get("name") self.vertex_format = [ (b'v_pos', 3, 'float'), (b'v_normal', 3, 'float'), (b'v_tc0', 2, 'float')] self.vertices = [] self.indices = [] def calculate_normals(self): for i in range(len(self.indices) / (3)): fi = i * 3 v1i = self.indices[fi] v2i = self.indices[fi + 1] v3i = self.indices[fi + 2] vs = self.vertices p1 = [vs[v1i + c] for c in range(3)] p2 = [vs[v2i + c] for c in range(3)] p3 = [vs[v3i + c] for c in range(3)] u, v = [0, 0, 0], [0, 0, 0] for j in range(3): v[j] = p2[j] - p1[j] u[j] = p3[j] - p1[j] n = [0, 0, 0] n[0] = u[1] * v[2] - u[2] * v[1] n[1] = u[2] * v[0] - u[0] * v[2] n[2] = u[0] * v[1] - u[1] * v[0] for k in range(3): self.vertices[v1i + 3 + k] = n[k] self.vertices[v2i + 3 + k] = n[k] self.vertices[v3i + 3 + k] = n[k] class ObjFile: def finish_object(self): if self._current_object is None: return mesh = MeshData() idx = 0 for f in self.faces: verts = f[0] norms = f[1] tcs = f[2] for i in range(3): # get normal components n = (0.0, 0.0, 0.0) if norms[i] != -1: n = self.normals[norms[i] - 1] # get texture coordinate components t = (0.0, 0.0) if tcs[i] != -1: t = self.texcoords[tcs[i] - 1] # get vertex components v = self.vertices[verts[i] - 1] data = [v[0], v[1], v[2], n[0], n[1], n[2], t[0], t[1]] mesh.vertices.extend(data) tri = [idx, idx + 1, idx + 2] mesh.indices.extend(tri) idx += 3 self.objects[self._current_object] = mesh # mesh.calculate_normals() self.faces = [] def __init__(self, filename, swapyz=False): """Loads a Wavefront OBJ file. """ self.objects = {} self.vertices = [] self.normals = [] self.texcoords = [] self.faces = [] self._current_object = None material = None for line in open(filename, "r"): if line.startswith('#'): continue if line.startswith('s'): continue values = line.split() if not values: continue if values[0] == 'o': self.finish_object() self._current_object = values[1] # elif values[0] == 'mtllib': # self.mtl = MTL(values[1]) # elif values[0] in ('usemtl', 'usemat'): # material = values[1] if values[0] == 'v': v = list(map(float, values[1:4])) if swapyz: v = v[0], v[2], v[1] self.vertices.append(v) elif values[0] == 'vn': v = list(map(float, values[1:4])) if swapyz: v = v[0], v[2], v[1] self.normals.append(v) elif values[0] == 'vt': self.texcoords.append(map(float, values[1:3])) elif values[0] == 'f': face = [] texcoords = [] norms = [] for v in values[1:]: w = v.split('/') face.append(int(w[0])) if len(w) >= 2 and len(w[1]) > 0: texcoords.append(int(w[1])) else: texcoords.append(-1) if len(w) >= 3 and len(w[2]) > 0: norms.append(int(w[2])) else: norms.append(-1) self.faces.append((face, norms, texcoords, material)) self.finish_object() def MTL(filename): contents = {} mtl = None return for line in open(filename, "r"): if line.startswith('#'): continue values = line.split() if not values: continue if values[0] == 'newmtl': mtl = contents[values[1]] = {} elif mtl is None: raise ValueError("mtl file doesn't start with newmtl stmt") mtl[values[0]] = values[1:] return contents
mit
puremourning/YouCompleteMe
python/ycm/tests/client/completion_request_test.py
6
9447
# Copyright (C) 2015-2019 YouCompleteMe Contributors # # This file is part of YouCompleteMe. # # YouCompleteMe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # YouCompleteMe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>. import json from hamcrest import assert_that, equal_to from ycm.tests.conftest import UserOptions from ycm.tests.test_utils import MockVimModule vim_mock = MockVimModule() from ycm.client import completion_request class ConvertCompletionResponseToVimDatas_test: """ This class tests the completion_request.ConvertCompletionResponseToVimDatas method """ def _Check( self, completion_data, expected_vim_data ): vim_data = completion_request.ConvertCompletionDataToVimData( completion_data ) try: assert_that( vim_data, equal_to( expected_vim_data ) ) except Exception: print( "Expected:\n" f"'{ expected_vim_data }'\n" "when parsing:\n'" f"{ completion_data }'\n" "But found:\n" f"'{ vim_data }'" ) raise def AllFields_test( self ): extra_data = { 'doc_string': 'DOC STRING', } self._Check( { 'insertion_text': 'INSERTION TEXT', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'EXTRA MENU INFO', 'kind': 'K', 'detailed_info': 'DETAILED INFO', 'extra_data': extra_data, }, { 'word' : 'INSERTION TEXT', 'abbr' : 'MENU TEXT', 'menu' : 'EXTRA MENU INFO', 'kind' : 'k', 'info' : 'DETAILED INFO\nDOC STRING', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': json.dumps( extra_data ), } ) def OnlyInsertionTextField_test( self ): self._Check( { 'insertion_text': 'INSERTION TEXT' }, { 'word' : 'INSERTION TEXT', 'abbr' : '', 'menu' : '', 'kind' : '', 'info' : '', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': '{}', } ) def JustDetailedInfo_test( self ): self._Check( { 'insertion_text': 'INSERTION TEXT', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'EXTRA MENU INFO', 'kind': 'K', 'detailed_info': 'DETAILED INFO', }, { 'word' : 'INSERTION TEXT', 'abbr' : 'MENU TEXT', 'menu' : 'EXTRA MENU INFO', 'kind' : 'k', 'info' : 'DETAILED INFO', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': '{}', } ) def JustDocString_test( self ): extra_data = { 'doc_string': 'DOC STRING', } self._Check( { 'insertion_text': 'INSERTION TEXT', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'EXTRA MENU INFO', 'kind': 'K', 'extra_data': extra_data, }, { 'word' : 'INSERTION TEXT', 'abbr' : 'MENU TEXT', 'menu' : 'EXTRA MENU INFO', 'kind' : 'k', 'info' : 'DOC STRING', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': json.dumps( extra_data ), } ) def ExtraInfoNoDocString_test( self ): self._Check( { 'insertion_text': 'INSERTION TEXT', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'EXTRA MENU INFO', 'kind': 'K', 'extra_data': { }, }, { 'word' : 'INSERTION TEXT', 'abbr' : 'MENU TEXT', 'menu' : 'EXTRA MENU INFO', 'kind' : 'k', 'info' : '', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': '{}', } ) def NullCharactersInExtraInfoAndDocString_test( self ): extra_data = { 'doc_string': 'DOC\x00STRING' } self._Check( { 'insertion_text': 'INSERTION TEXT', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'EXTRA MENU INFO', 'kind': 'K', 'detailed_info': 'DETAILED\x00INFO', 'extra_data': extra_data, }, { 'word' : 'INSERTION TEXT', 'abbr' : 'MENU TEXT', 'menu' : 'EXTRA MENU INFO', 'kind' : 'k', 'info' : 'DETAILEDINFO\nDOCSTRING', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': json.dumps( extra_data ), } ) def ExtraInfoNoDocStringWithDetailedInfo_test( self ): self._Check( { 'insertion_text': 'INSERTION TEXT', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'EXTRA MENU INFO', 'kind': 'K', 'detailed_info': 'DETAILED INFO', 'extra_data': { }, }, { 'word' : 'INSERTION TEXT', 'abbr' : 'MENU TEXT', 'menu' : 'EXTRA MENU INFO', 'kind' : 'k', 'info' : 'DETAILED INFO', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': '{}', } ) def EmptyInsertionText_test( self ): extra_data = { 'doc_string': 'DOC STRING', } self._Check( { 'insertion_text': '', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'EXTRA MENU INFO', 'kind': 'K', 'detailed_info': 'DETAILED INFO', 'extra_data': extra_data, }, { 'word' : '', 'abbr' : 'MENU TEXT', 'menu' : 'EXTRA MENU INFO', 'kind' : 'k', 'info' : 'DETAILED INFO\nDOC STRING', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': json.dumps( extra_data ), } ) def TruncateForPopup_test( self, *args ): with UserOptions( { '&columns': 60, '&completeopt': b'popup,menuone' } ): extra_data = { 'doc_string': 'DOC STRING', } self._Check( { 'insertion_text': '', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'ESPECIALLY LONG EXTRA MENU INFO LOREM IPSUM DOLOR', 'kind': 'K', 'detailed_info': 'DETAILED INFO', 'extra_data': extra_data, }, { 'word' : '', 'abbr' : 'MENU TEXT', 'menu' : 'ESPECIALLY LONG E...', 'kind' : 'k', 'info' : 'ESPECIALLY LONG EXTRA MENU INFO LOREM IPSUM DOLOR\n\n' + 'DETAILED INFO\nDOC STRING', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': json.dumps( extra_data ), } ) def OnlyTruncateForPopupIfNecessary_test( self, *args ): with UserOptions( { '&columns': 60, '&completeopt': b'popup,menuone' } ): extra_data = { 'doc_string': 'DOC STRING', } self._Check( { 'insertion_text': '', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'EXTRA MENU INFO', 'kind': 'K', 'detailed_info': 'DETAILED INFO', 'extra_data': extra_data, }, { 'word' : '', 'abbr' : 'MENU TEXT', 'menu' : 'EXTRA MENU INFO', 'kind' : 'k', 'info' : 'DETAILED INFO\nDOC STRING', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': json.dumps( extra_data ), } ) def DontTruncateIfNotPopup_test( self, *args ): with UserOptions( { '&columns': 60, '&completeopt': b'preview,menuone' } ): extra_data = { 'doc_string': 'DOC STRING', } self._Check( { 'insertion_text': '', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'ESPECIALLY LONG EXTRA MENU INFO LOREM IPSUM DOLOR', 'kind': 'K', 'detailed_info': 'DETAILED INFO', 'extra_data': extra_data, }, { 'word' : '', 'abbr' : 'MENU TEXT', 'menu' : 'ESPECIALLY LONG EXTRA MENU INFO LOREM IPSUM DOLOR', 'kind' : 'k', 'info' : 'DETAILED INFO\nDOC STRING', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': json.dumps( extra_data ), } ) def TruncateForPopupWithoutDuplication_test( self, *args ): with UserOptions( { '&columns': 60, '&completeopt': b'popup,menuone' } ): extra_data = { 'doc_string': 'DOC STRING', } self._Check( { 'insertion_text': '', 'menu_text': 'MENU TEXT', 'extra_menu_info': 'ESPECIALLY LONG METHOD SIGNATURE LOREM IPSUM', 'kind': 'K', 'detailed_info': 'ESPECIALLY LONG METHOD SIGNATURE LOREM IPSUM', 'extra_data': extra_data, }, { 'word' : '', 'abbr' : 'MENU TEXT', 'menu' : 'ESPECIALLY LONG M...', 'kind' : 'k', 'info' : 'ESPECIALLY LONG METHOD SIGNATURE LOREM IPSUM\n' + 'DOC STRING', 'equal' : 1, 'dup' : 1, 'empty' : 1, 'user_data': json.dumps( extra_data ), } )
gpl-3.0
Endika/omim
3party/protobuf/python/google/protobuf/descriptor_pool.py
70
22797
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Provides DescriptorPool to use as a container for proto2 descriptors. The DescriptorPool is used in conjection with a DescriptorDatabase to maintain a collection of protocol buffer descriptors for use when dynamically creating message types at runtime. For most applications protocol buffers should be used via modules generated by the protocol buffer compiler tool. This should only be used when the type of protocol buffers used in an application or library cannot be predetermined. Below is a straightforward example on how to use this class: pool = DescriptorPool() file_descriptor_protos = [ ... ] for file_descriptor_proto in file_descriptor_protos: pool.Add(file_descriptor_proto) my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType') The message descriptor can be used in conjunction with the message_factory module in order to create a protocol buffer class that can be encoded and decoded. If you want to get a Python class for the specified proto, use the helper functions inside google.protobuf.message_factory directly instead of this class. """ __author__ = 'matthewtoia@google.com (Matt Toia)' import sys from google.protobuf import descriptor from google.protobuf import descriptor_database from google.protobuf import text_encoding def _NormalizeFullyQualifiedName(name): """Remove leading period from fully-qualified type name. Due to b/13860351 in descriptor_database.py, types in the root namespace are generated with a leading period. This function removes that prefix. Args: name: A str, the fully-qualified symbol name. Returns: A str, the normalized fully-qualified symbol name. """ return name.lstrip('.') class DescriptorPool(object): """A collection of protobufs dynamically constructed by descriptor protos.""" def __init__(self, descriptor_db=None): """Initializes a Pool of proto buffs. The descriptor_db argument to the constructor is provided to allow specialized file descriptor proto lookup code to be triggered on demand. An example would be an implementation which will read and compile a file specified in a call to FindFileByName() and not require the call to Add() at all. Results from this database will be cached internally here as well. Args: descriptor_db: A secondary source of file descriptors. """ self._internal_db = descriptor_database.DescriptorDatabase() self._descriptor_db = descriptor_db self._descriptors = {} self._enum_descriptors = {} self._file_descriptors = {} def Add(self, file_desc_proto): """Adds the FileDescriptorProto and its types to this pool. Args: file_desc_proto: The FileDescriptorProto to add. """ self._internal_db.Add(file_desc_proto) def AddDescriptor(self, desc): """Adds a Descriptor to the pool, non-recursively. If the Descriptor contains nested messages or enums, the caller must explicitly register them. This method also registers the FileDescriptor associated with the message. Args: desc: A Descriptor. """ if not isinstance(desc, descriptor.Descriptor): raise TypeError('Expected instance of descriptor.Descriptor.') self._descriptors[desc.full_name] = desc self.AddFileDescriptor(desc.file) def AddEnumDescriptor(self, enum_desc): """Adds an EnumDescriptor to the pool. This method also registers the FileDescriptor associated with the message. Args: enum_desc: An EnumDescriptor. """ if not isinstance(enum_desc, descriptor.EnumDescriptor): raise TypeError('Expected instance of descriptor.EnumDescriptor.') self._enum_descriptors[enum_desc.full_name] = enum_desc self.AddFileDescriptor(enum_desc.file) def AddFileDescriptor(self, file_desc): """Adds a FileDescriptor to the pool, non-recursively. If the FileDescriptor contains messages or enums, the caller must explicitly register them. Args: file_desc: A FileDescriptor. """ if not isinstance(file_desc, descriptor.FileDescriptor): raise TypeError('Expected instance of descriptor.FileDescriptor.') self._file_descriptors[file_desc.name] = file_desc def FindFileByName(self, file_name): """Gets a FileDescriptor by file name. Args: file_name: The path to the file to get a descriptor for. Returns: A FileDescriptor for the named file. Raises: KeyError: if the file can not be found in the pool. """ try: return self._file_descriptors[file_name] except KeyError: pass try: file_proto = self._internal_db.FindFileByName(file_name) except KeyError: _, error, _ = sys.exc_info() #PY25 compatible for GAE. if self._descriptor_db: file_proto = self._descriptor_db.FindFileByName(file_name) else: raise error if not file_proto: raise KeyError('Cannot find a file named %s' % file_name) return self._ConvertFileProtoToFileDescriptor(file_proto) def FindFileContainingSymbol(self, symbol): """Gets the FileDescriptor for the file containing the specified symbol. Args: symbol: The name of the symbol to search for. Returns: A FileDescriptor that contains the specified symbol. Raises: KeyError: if the file can not be found in the pool. """ symbol = _NormalizeFullyQualifiedName(symbol) try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: file_proto = self._internal_db.FindFileContainingSymbol(symbol) except KeyError: _, error, _ = sys.exc_info() #PY25 compatible for GAE. if self._descriptor_db: file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) else: raise error if not file_proto: raise KeyError('Cannot find a file containing %s' % symbol) return self._ConvertFileProtoToFileDescriptor(file_proto) def FindMessageTypeByName(self, full_name): """Loads the named descriptor from the pool. Args: full_name: The full name of the descriptor to load. Returns: The descriptor for the named type. """ full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._descriptors: self.FindFileContainingSymbol(full_name) return self._descriptors[full_name] def FindEnumTypeByName(self, full_name): """Loads the named enum descriptor from the pool. Args: full_name: The full name of the enum descriptor to load. Returns: The enum descriptor for the named type. """ full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._enum_descriptors: self.FindFileContainingSymbol(full_name) return self._enum_descriptors[full_name] def _ConvertFileProtoToFileDescriptor(self, file_proto): """Creates a FileDescriptor from a proto or returns a cached copy. This method also has the side effect of loading all the symbols found in the file into the appropriate dictionaries in the pool. Args: file_proto: The proto to convert. Returns: A FileDescriptor matching the passed in proto. """ if file_proto.name not in self._file_descriptors: built_deps = list(self._GetDeps(file_proto.dependency)) direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] file_descriptor = descriptor.FileDescriptor( name=file_proto.name, package=file_proto.package, options=file_proto.options, serialized_pb=file_proto.SerializeToString(), dependencies=direct_deps) scope = {} # This loop extracts all the message and enum types from all the # dependencoes of the file_proto. This is necessary to create the # scope of available message types when defining the passed in # file proto. for dependency in built_deps: scope.update(self._ExtractSymbols( dependency.message_types_by_name.values())) scope.update((_PrefixWithDot(enum.full_name), enum) for enum in dependency.enum_types_by_name.values()) for message_type in file_proto.message_type: message_desc = self._ConvertMessageDescriptor( message_type, file_proto.package, file_descriptor, scope) file_descriptor.message_types_by_name[message_desc.name] = message_desc for enum_type in file_proto.enum_type: file_descriptor.enum_types_by_name[enum_type.name] = ( self._ConvertEnumDescriptor(enum_type, file_proto.package, file_descriptor, None, scope)) for index, extension_proto in enumerate(file_proto.extension): extension_desc = self.MakeFieldDescriptor( extension_proto, file_proto.package, index, is_extension=True) extension_desc.containing_type = self._GetTypeFromScope( file_descriptor.package, extension_proto.extendee, scope) self.SetFieldType(extension_proto, extension_desc, file_descriptor.package, scope) file_descriptor.extensions_by_name[extension_desc.name] = extension_desc for desc_proto in file_proto.message_type: self.SetAllFieldTypes(file_proto.package, desc_proto, scope) if file_proto.package: desc_proto_prefix = _PrefixWithDot(file_proto.package) else: desc_proto_prefix = '' for desc_proto in file_proto.message_type: desc = self._GetTypeFromScope(desc_proto_prefix, desc_proto.name, scope) file_descriptor.message_types_by_name[desc_proto.name] = desc self.Add(file_proto) self._file_descriptors[file_proto.name] = file_descriptor return self._file_descriptors[file_proto.name] def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, scope=None): """Adds the proto to the pool in the specified package. Args: desc_proto: The descriptor_pb2.DescriptorProto protobuf message. package: The package the proto should be located in. file_desc: The file containing this message. scope: Dict mapping short and full symbols to message and enum types. Returns: The added descriptor. """ if package: desc_name = '.'.join((package, desc_proto.name)) else: desc_name = desc_proto.name if file_desc is None: file_name = None else: file_name = file_desc.name if scope is None: scope = {} nested = [ self._ConvertMessageDescriptor(nested, desc_name, file_desc, scope) for nested in desc_proto.nested_type] enums = [ self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope) for enum in desc_proto.enum_type] fields = [self.MakeFieldDescriptor(field, desc_name, index) for index, field in enumerate(desc_proto.field)] extensions = [ self.MakeFieldDescriptor(extension, desc_name, index, is_extension=True) for index, extension in enumerate(desc_proto.extension)] oneofs = [ descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)), index, None, []) for index, desc in enumerate(desc_proto.oneof_decl)] extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] if extension_ranges: is_extendable = True else: is_extendable = False desc = descriptor.Descriptor( name=desc_proto.name, full_name=desc_name, filename=file_name, containing_type=None, fields=fields, oneofs=oneofs, nested_types=nested, enum_types=enums, extensions=extensions, options=desc_proto.options, is_extendable=is_extendable, extension_ranges=extension_ranges, file=file_desc, serialized_start=None, serialized_end=None) for nested in desc.nested_types: nested.containing_type = desc for enum in desc.enum_types: enum.containing_type = desc for field_index, field_desc in enumerate(desc_proto.field): if field_desc.HasField('oneof_index'): oneof_index = field_desc.oneof_index oneofs[oneof_index].fields.append(fields[field_index]) fields[field_index].containing_oneof = oneofs[oneof_index] scope[_PrefixWithDot(desc_name)] = desc self._descriptors[desc_name] = desc return desc def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, containing_type=None, scope=None): """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. Args: enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. package: Optional package name for the new message EnumDescriptor. file_desc: The file containing the enum descriptor. containing_type: The type containing this enum. scope: Scope containing available types. Returns: The added descriptor """ if package: enum_name = '.'.join((package, enum_proto.name)) else: enum_name = enum_proto.name if file_desc is None: file_name = None else: file_name = file_desc.name values = [self._MakeEnumValueDescriptor(value, index) for index, value in enumerate(enum_proto.value)] desc = descriptor.EnumDescriptor(name=enum_proto.name, full_name=enum_name, filename=file_name, file=file_desc, values=values, containing_type=containing_type, options=enum_proto.options) scope['.%s' % enum_name] = desc self._enum_descriptors[enum_name] = desc return desc def MakeFieldDescriptor(self, field_proto, message_name, index, is_extension=False): """Creates a field descriptor from a FieldDescriptorProto. For message and enum type fields, this method will do a look up in the pool for the appropriate descriptor for that type. If it is unavailable, it will fall back to the _source function to create it. If this type is still unavailable, construction will fail. Args: field_proto: The proto describing the field. message_name: The name of the containing message. index: Index of the field is_extension: Indication that this field is for an extension. Returns: An initialized FieldDescriptor object """ if message_name: full_name = '.'.join((message_name, field_proto.name)) else: full_name = field_proto.name return descriptor.FieldDescriptor( name=field_proto.name, full_name=full_name, index=index, number=field_proto.number, type=field_proto.type, cpp_type=None, message_type=None, enum_type=None, containing_type=None, label=field_proto.label, has_default_value=False, default_value=None, is_extension=is_extension, extension_scope=None, options=field_proto.options) def SetAllFieldTypes(self, package, desc_proto, scope): """Sets all the descriptor's fields's types. This method also sets the containing types on any extensions. Args: package: The current package of desc_proto. desc_proto: The message descriptor to update. scope: Enclosing scope of available types. """ package = _PrefixWithDot(package) main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) if package == '.': nested_package = _PrefixWithDot(desc_proto.name) else: nested_package = '.'.join([package, desc_proto.name]) for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): self.SetFieldType(field_proto, field_desc, nested_package, scope) for extension_proto, extension_desc in ( zip(desc_proto.extension, main_desc.extensions)): extension_desc.containing_type = self._GetTypeFromScope( nested_package, extension_proto.extendee, scope) self.SetFieldType(extension_proto, extension_desc, nested_package, scope) for nested_type in desc_proto.nested_type: self.SetAllFieldTypes(nested_package, nested_type, scope) def SetFieldType(self, field_proto, field_desc, package, scope): """Sets the field's type, cpp_type, message_type and enum_type. Args: field_proto: Data about the field in proto format. field_desc: The descriptor to modiy. package: The package the field's container is in. scope: Enclosing scope of available types. """ if field_proto.type_name: desc = self._GetTypeFromScope(package, field_proto.type_name, scope) else: desc = None if not field_proto.HasField('type'): if isinstance(desc, descriptor.Descriptor): field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE else: field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( field_proto.type) if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): field_desc.message_type = desc if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.enum_type = desc if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: field_desc.has_default_value = False field_desc.default_value = [] elif field_proto.HasField('default_value'): field_desc.has_default_value = True if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): field_desc.default_value = float(field_proto.default_value) elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: field_desc.default_value = field_proto.default_value elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: field_desc.default_value = field_proto.default_value.lower() == 'true' elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.default_value = field_desc.enum_type.values_by_name[ field_proto.default_value].index elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: field_desc.default_value = text_encoding.CUnescape( field_proto.default_value) else: field_desc.default_value = int(field_proto.default_value) else: field_desc.has_default_value = False field_desc.default_value = None field_desc.type = field_proto.type def _MakeEnumValueDescriptor(self, value_proto, index): """Creates a enum value descriptor object from a enum value proto. Args: value_proto: The proto describing the enum value. index: The index of the enum value. Returns: An initialized EnumValueDescriptor object. """ return descriptor.EnumValueDescriptor( name=value_proto.name, index=index, number=value_proto.number, options=value_proto.options, type=None) def _ExtractSymbols(self, descriptors): """Pulls out all the symbols from descriptor protos. Args: descriptors: The messages to extract descriptors from. Yields: A two element tuple of the type name and descriptor object. """ for desc in descriptors: yield (_PrefixWithDot(desc.full_name), desc) for symbol in self._ExtractSymbols(desc.nested_types): yield symbol for enum in desc.enum_types: yield (_PrefixWithDot(enum.full_name), enum) def _GetDeps(self, dependencies): """Recursively finds dependencies for file protos. Args: dependencies: The names of the files being depended on. Yields: Each direct and indirect dependency. """ for dependency in dependencies: dep_desc = self.FindFileByName(dependency) yield dep_desc for parent_dep in dep_desc.dependencies: yield parent_dep def _GetTypeFromScope(self, package, type_name, scope): """Finds a given type name in the current scope. Args: package: The package the proto should be located in. type_name: The name of the type to be found in the scope. scope: Dict mapping short and full symbols to message and enum types. Returns: The descriptor for the requested type. """ if type_name not in scope: components = _PrefixWithDot(package).split('.') while components: possible_match = '.'.join(components + [type_name]) if possible_match in scope: type_name = possible_match break else: components.pop(-1) return scope[type_name] def _PrefixWithDot(name): return name if name.startswith('.') else '.%s' % name
apache-2.0
StefanRijnhart/OpenUpgrade
addons/account_asset/wizard/account_asset_change_duration.py
40
6014
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from lxml import etree from openerp.osv import fields, osv class asset_modify(osv.osv_memory): _name = 'asset.modify' _description = 'Modify Asset' _columns = { 'name': fields.char('Reason', size=64, required=True), 'method_number': fields.integer('Number of Depreciations', required=True), 'method_period': fields.integer('Period Length'), 'method_end': fields.date('Ending date'), 'note': fields.text('Notes'), } def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): """ Returns views and fields for current model. @param cr: A database cursor @param user: ID of the user currently logged in @param view_id: list of fields, which required to read signatures @param view_type: defines a view type. it can be one of (form, tree, graph, calender, gantt, search, mdx) @param context: context arguments, like lang, time zone @param toolbar: contains a list of reports, wizards, and links related to current model @return: Returns a dictionary that contains definition for fields, views, and toolbars """ if not context: context = {} asset_obj = self.pool.get('account.asset.asset') result = super(asset_modify, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu) asset_id = context.get('active_id', False) active_model = context.get('active_model', '') if active_model == 'account.asset.asset' and asset_id: asset = asset_obj.browse(cr, uid, asset_id, context=context) doc = etree.XML(result['arch']) if asset.method_time == 'number': node = doc.xpath("//field[@name='method_end']")[0] node.set('invisible', '1') elif asset.method_time == 'end': node = doc.xpath("//field[@name='method_number']")[0] node.set('invisible', '1') result['arch'] = etree.tostring(doc) return result def default_get(self, cr, uid, fields, context=None): """ To get default values for the object. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param fields: List of fields for which we want default values @param context: A standard dictionary @return: A dictionary which of fields with values. """ if not context: context = {} asset_obj = self.pool.get('account.asset.asset') res = super(asset_modify, self).default_get(cr, uid, fields, context=context) asset_id = context.get('active_id', False) asset = asset_obj.browse(cr, uid, asset_id, context=context) if 'name' in fields: res.update({'name': asset.name}) if 'method_number' in fields and asset.method_time == 'number': res.update({'method_number': asset.method_number}) if 'method_period' in fields: res.update({'method_period': asset.method_period}) if 'method_end' in fields and asset.method_time == 'end': res.update({'method_end': asset.method_end}) return res def modify(self, cr, uid, ids, context=None): """ Modifies the duration of asset for calculating depreciation and maintains the history of old values. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of Ids @param context: A standard dictionary @return: Close the wizard. """ if not context: context = {} asset_obj = self.pool.get('account.asset.asset') history_obj = self.pool.get('account.asset.history') asset_id = context.get('active_id', False) asset = asset_obj.browse(cr, uid, asset_id, context=context) data = self.browse(cr, uid, ids[0], context=context) history_vals = { 'asset_id': asset_id, 'name': data.name, 'method_time': asset.method_time, 'method_number': asset.method_number, 'method_period': asset.method_period, 'method_end': asset.method_end, 'user_id': uid, 'date': time.strftime('%Y-%m-%d'), 'note': data.note, } history_obj.create(cr, uid, history_vals, context=context) asset_vals = { 'method_number': data.method_number, 'method_period': data.method_period, 'method_end': data.method_end, } asset_obj.write(cr, uid, [asset_id], asset_vals, context=context) asset_obj.compute_depreciation_board(cr, uid, [asset_id], context=context) return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
laurenrevere/osf.io
website/project/views/tag.py
12
1748
import httplib as http from flask import request from django.core.exceptions import ValidationError from framework.auth.decorators import collect_auth from osf.models import AbstractNode from website.exceptions import InvalidTagError, NodeStateError, TagNotFoundError from website.project.decorators import ( must_be_valid_project, must_have_permission, must_not_be_registration ) # Disabled for now. Should implement pagination, or at least cap the number of # nodes serialized, before re-enabling. @collect_auth def project_tag(tag, auth, **kwargs): nodes = AbstractNode.objects.filter(tags___id=tag).can_view(auth.user).values('title', 'url') return { 'nodes': [ { 'title': node['title'], 'url': node['url'], } for node in nodes ], 'tag': tag, } @must_be_valid_project # injects project @must_have_permission('write') @must_not_be_registration def project_add_tag(auth, node, **kwargs): data = request.get_json() tag = data['tag'] if tag: try: node.add_tag(tag=tag, auth=auth) return {'status': 'success'}, http.CREATED except ValidationError: return {'status': 'error'}, http.BAD_REQUEST @must_be_valid_project # injects project @must_have_permission('write') @must_not_be_registration def project_remove_tag(auth, node, **kwargs): data = request.get_json() try: node.remove_tag(tag=data['tag'], auth=auth) except TagNotFoundError: return {'status': 'failure'}, http.CONFLICT except (InvalidTagError, NodeStateError): return {'status': 'failure'}, http.BAD_REQUEST else: return {'status': 'success'}, http.OK
apache-2.0
jalavik/invenio
invenio/legacy/websubmit/engine.py
13
82527
# This file is part of Invenio. # Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """WebSubmit: the mechanism for the submission of new records into Invenio via a Web interface. """ __revision__ = "$Id$" # import interesting modules: import traceback import string import os import sys import time import types import re import pprint from urllib import quote_plus from cgi import escape from invenio.config import \ CFG_SITE_LANG, \ CFG_SITE_NAME, \ CFG_SITE_URL, \ CFG_WEBSUBMIT_STORAGEDIR, \ CFG_DEVEL_SITE, \ CFG_SITE_SECURE_URL, \ CFG_WEBSUBMIT_USE_MATHJAX from invenio.legacy.dbquery import Error from invenio.modules.access.engine import acc_authorize_action from invenio.legacy.webpage import page, error_page, warning_page from invenio.legacy.webuser import getUid, get_email, collect_user_info, isGuestUser, \ page_not_authorized from invenio.legacy.websubmit.config import CFG_RESERVED_SUBMISSION_FILENAMES, \ InvenioWebSubmitFunctionError, InvenioWebSubmitFunctionStop, \ InvenioWebSubmitFunctionWarning from invenio.base.i18n import gettext_set_language, wash_language from invenio.legacy.webstat.api import register_customevent from invenio.ext.logging import register_exception from invenio.utils.url import make_canonical_urlargd, redirect_to_url from invenio.legacy.websubmit.admin_engine import string_is_alphanumeric_including_underscore from invenio.utils.html import get_mathjax_header from invenio.legacy.websubmit.db_layer import \ get_storage_directory_of_action, \ get_longname_of_doctype, \ get_longname_of_action, \ get_num_pages_of_submission, \ get_parameter_value_for_doctype, \ submission_exists_in_log, \ log_new_pending_submission, \ log_new_completed_submission, \ update_submission_modified_date_in_log, \ update_submission_reference_in_log, \ update_submission_reference_and_status_in_log, \ get_form_fields_on_submission_page, \ get_element_description, \ get_element_check_description, \ get_form_fields_not_on_submission_page, \ function_step_is_last, \ get_collection_children_of_submission_collection, \ get_submission_collection_name, \ get_doctype_children_of_submission_collection, \ get_categories_of_doctype, \ get_doctype_details, \ get_actions_on_submission_page_for_doctype, \ get_action_details, \ get_parameters_of_function, \ get_details_of_submission, \ get_functions_for_submission_step, \ get_submissions_at_level_X_with_score_above_N, \ submission_is_finished import invenio.legacy.template websubmit_templates = invenio.legacy.template.load('websubmit') def interface(req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG, doctype="", act="", startPg=1, access="", mainmenu="", fromdir="", nextPg="", nbPg="", curpage=1): """This function is called after a user has visited a document type's "homepage" and selected the type of "action" to perform. Having clicked an action-button (e.g. "Submit a New Record"), this function will be called . It performs the task of initialising a new submission session (retrieving information about the submission, creating a working submission-directory, etc), and "drawing" a submission page containing the WebSubmit form that the user uses to input the metadata to be submitted. When a user moves between pages in the submission interface, this function is recalled so that it can save the metadata entered into the previous page by the user, and draw the current submission-page. Note: During a submission, for each page refresh, this function will be called while the variable "step" (a form variable, seen by websubmit_webinterface, which calls this function) is 0 (ZERO). In other words, this function handles the FRONT-END phase of a submission, BEFORE the WebSubmit functions are called. @param req: (apache request object) *** NOTE: Added into this object, is a variable called "form" (req.form). This is added into the object in the index function of websubmit_webinterface. It contains a "mod_python.util.FieldStorage" instance, that contains the form-fields found on the previous submission page. @param c: (string), defaulted to CFG_SITE_NAME. The name of the Invenio installation. @param ln: (string), defaulted to CFG_SITE_LANG. The language in which to display the pages. @param doctype: (string) - the doctype ID of the doctype for which the submission is being made. @param act: (string) - The ID of the action being performed (e.g. submission of bibliographic information; modification of bibliographic information, etc). @param startPg: (integer) - Starting page for the submission? Defaults to 1. @param indir: (string) - the directory used to store all submissions of the given "type" of this submission. For example, if the submission is of the type "modify bibliographic information", this variable would contain "modify". @param access: (string) - the "access" number for the submission (e.g. 1174062451_7010). This number is also used as the name for the current working submission directory. @param mainmenu: (string) - contains the URL (minus the Invenio home stem) for the submission's home-page. (E.g. If this submission is "PICT", the "mainmenu" file would contain "/submit?doctype=PICT". @param fromdir: (integer) @param nextPg: (string) @param nbPg: (string) @param curpage: (integer) - the current submission page number. Defaults to 1. """ ln = wash_language(ln) # load the right message language _ = gettext_set_language(ln) sys.stdout = req # get user ID: user_info = collect_user_info(req) uid = user_info['uid'] uid_email = user_info['email'] # variable initialisation t = "" field = [] fieldhtml = [] level = [] fullDesc = [] text = '' check = [] select = [] radio = [] upload = [] txt = [] noPage = [] # Preliminary tasks if not access: # In some cases we want to take the users directly to the submit-form. # This fix makes this possible - as it generates the required access # parameter if it is not present. pid = os.getpid() now = time.time() access = "%i_%s" % (now, pid) # check we have minimum fields if not doctype or not act or not access: ## We don't have all the necessary information to go ahead ## with this submission: return warning_page(_("Not enough information to go ahead with the submission."), req, ln) try: assert(not access or re.match('\d+_\d+', access)) except AssertionError: register_exception(req=req, prefix='doctype="%s", access="%s"' % (doctype, access)) return warning_page(_("Invalid parameters"), req, ln) if doctype and act: ## Let's clean the input details = get_details_of_submission(doctype, act) if not details: return warning_page(_("Invalid doctype and act parameters"), req, ln) doctype = details[0] act = details[1] ## Before continuing to display the submission form interface, ## verify that this submission has not already been completed: if submission_is_finished(doctype, act, access, uid_email): ## This submission has already been completed. ## This situation can arise when, having completed a submission, ## the user uses the browser's back-button to go back to the form ## stage of the submission and then tries to submit once more. ## This is unsafe and should not be allowed. Instead of re-displaying ## the submission forms, display an error message to the user: wrnmsg = """<b>This submission has been completed. Please go to the""" \ """ <a href="/submit?doctype=%(doctype)s&amp;ln=%(ln)s">""" \ """main menu</a> to start a new submission.</b>""" \ % { 'doctype' : quote_plus(doctype), 'ln' : ln } return warning_page(wrnmsg, req, ln) ## retrieve the action and doctype data: ## Concatenate action ID and doctype ID to make the submission ID: subname = "%s%s" % (act, doctype) ## Get the submission storage directory from the DB: submission_dir = get_storage_directory_of_action(act) if submission_dir: indir = submission_dir else: ## Unable to determine the submission-directory: return warning_page(_("Unable to find the submission directory for the action: %(x_dir)s", x_dir=escape(str(act))), req, ln) ## get the document type's long-name: doctype_lname = get_longname_of_doctype(doctype) if doctype_lname is not None: ## Got the doctype long-name: replace spaces with HTML chars: docname = doctype_lname.replace(" ", "&nbsp;") else: ## Unknown document type: return warning_page(_("Unknown document type"), req, ln) ## get the action's long-name: actname = get_longname_of_action(act) if actname is None: ## Unknown action: return warning_page(_("Unknown action"), req, ln) ## Get the number of pages for this submission: num_submission_pages = get_num_pages_of_submission(subname) if num_submission_pages is not None: nbpages = num_submission_pages else: ## Unable to determine the number of pages for this submission: return warning_page(_("Unable to determine the number of submission pages."), req, ln) ## If unknown, get the current page of submission: if startPg != "" and curpage in ("", 0): curpage = startPg ## retrieve the name of the file in which the reference of ## the submitted document will be stored rn_filename = get_parameter_value_for_doctype(doctype, "edsrn") if rn_filename is not None: edsrn = rn_filename else: ## Unknown value for edsrn - set it to an empty string: edsrn = "" ## This defines the path to the directory containing the action data curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, indir, doctype, access) try: assert(curdir == os.path.abspath(curdir)) except AssertionError: register_exception(req=req, prefix='indir="%s", doctype="%s", access="%s"' % (indir, doctype, access)) return warning_page(_("Invalid parameters"), req, ln) ## if this submission comes from another one (fromdir is then set) ## We retrieve the previous submission directory and put it in the proper one if fromdir != "": olddir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, fromdir, doctype, access) try: assert(olddir == os.path.abspath(olddir)) except AssertionError: register_exception(req=req, prefix='fromdir="%s", doctype="%s", access="%s"' % (fromdir, doctype, access)) return warning_page(_("Invalid parameters"), req, ln) if os.path.exists(olddir): os.rename(olddir, curdir) ## If the submission directory still does not exist, we create it if not os.path.exists(curdir): try: os.makedirs(curdir) except Exception as e: register_exception(req=req, alert_admin=True) return warning_page(_("Unable to create a directory for this submission. The administrator has been alerted."), req, ln) ## Retrieve the previous page, as submitted to curdir (before we ## overwrite it with our curpage as declared from the incoming ## form) try: fp = open(os.path.join(curdir, "curpage")) previous_page_from_disk = fp.read() fp.close() except: previous_page_from_disk = "1" # retrieve the original main menu url and save it in the "mainmenu" file if mainmenu != "": fp = open(os.path.join(curdir, "mainmenu"), "w") fp.write(mainmenu) fp.close() # and if the file containing the URL to the main menu exists # we retrieve it and store it in the $mainmenu variable if os.path.exists(os.path.join(curdir, "mainmenu")): fp = open(os.path.join(curdir, "mainmenu"), "r"); mainmenu = fp.read() fp.close() else: mainmenu = "%s/submit" % (CFG_SITE_URL,) # various authentication related tasks... if uid_email != "guest" and uid_email != "": #First save the username (email address) in the SuE file. This way bibconvert will be able to use it if needed fp = open(os.path.join(curdir, "SuE"), "w") fp.write(uid_email) fp.close() if os.path.exists(os.path.join(curdir, "combo%s" % doctype)): fp = open(os.path.join(curdir, "combo%s" % doctype), "r"); categ = fp.read() fp.close() else: categ = req.form.get('combo%s' % doctype, '*') # is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action(req, 'submit', \ authorized_if_no_roles=not isGuestUser(uid), \ verbose=0, \ doctype=doctype, \ act=act, \ categ=categ) if not auth_code == 0: return warning_page("""<center><font color="red">%s</font></center>""" % auth_message, req, ln) ## update the "journal of submission": ## Does the submission already exist in the log? submission_exists = \ submission_exists_in_log(doctype, act, access, uid_email) if submission_exists == 1: ## update the modification-date of this submission in the log: update_submission_modified_date_in_log(doctype, act, access, uid_email) else: ## Submission doesn't exist in log - create it: log_new_pending_submission(doctype, act, access, uid_email) ## Let's write in curdir file under curdir the curdir value ## in case e.g. it is needed in FFT. fp = open(os.path.join(curdir, "curdir"), "w") fp.write(curdir) fp.close() ## Let's write in ln file the current language fp = open(os.path.join(curdir, "ln"), "w") fp.write(ln) fp.close() # Save the form fields entered in the previous submission page # If the form was sent with the GET method form = dict(req.form) value = "" # we parse all the form variables for key, formfields in form.items(): filename = key.replace("[]", "") file_to_open = os.path.join(curdir, filename) try: assert(file_to_open == os.path.abspath(file_to_open)) except AssertionError: register_exception(req=req, prefix='curdir="%s", filename="%s"' % (curdir, filename)) return warning_page(_("Invalid parameters"), req, ln) # Do not write reserved filenames to disk if filename in CFG_RESERVED_SUBMISSION_FILENAMES: # Unless there is really an element with that name on this # page or previous one (either visited, or declared to be # visited), which means that admin authorized it. if not ((str(curpage).isdigit() and \ filename in [submission_field[3] for submission_field in \ get_form_fields_on_submission_page(subname, curpage)]) or \ (str(curpage).isdigit() and int(curpage) > 1 and \ filename in [submission_field[3] for submission_field in \ get_form_fields_on_submission_page(subname, int(curpage) - 1)]) or \ (previous_page_from_disk.isdigit() and \ filename in [submission_field[3] for submission_field in \ get_form_fields_on_submission_page(subname, int(previous_page_from_disk))])): # Still this will filter out reserved field names that # might have been called by functions such as # Create_Modify_Interface function in MBI step, or # dynamic fields in response elements, but that is # unlikely to be a problem. continue # Skip variables containing characters that are not allowed in # WebSubmit elements if not string_is_alphanumeric_including_underscore(filename): continue # the field is an array if isinstance(formfields, types.ListType): fp = open(file_to_open, "w") for formfield in formfields: #stripslashes(value) value = specialchars(formfield) fp.write(value+"\n") fp.close() # the field is a normal string elif isinstance(formfields, types.StringTypes) and formfields != "": value = formfields fp = open(file_to_open, "w") fp.write(specialchars(value)) fp.close() # the field is a file elif hasattr(formfields,"filename") and formfields.filename: dir_to_open = os.path.join(curdir, 'files', key) try: assert(dir_to_open == os.path.abspath(dir_to_open)) assert(dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) return warning_page(_("Invalid parameters"), req, ln) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except: register_exception(req=req, alert_admin=True) return warning_page(_("Cannot create submission directory. The administrator has been alerted."), req, ln) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": fp = open(os.path.join(dir_to_open, filename), "w") while True: buf = formfields.read(10240) if buf: fp.write(buf) else: break fp.close() fp = open(os.path.join(curdir, "lastuploadedfile"), "w") fp.write(filename) fp.close() fp = open(file_to_open, "w") fp.write(filename) fp.close() else: return warning_page(_("No file uploaded?"), req, ln) ## if the found field is the reference of the document, ## save this value in the "journal of submissions": if uid_email != "" and uid_email != "guest": if key == edsrn: update_submission_reference_in_log(doctype, access, uid_email, value) ## create the interface: subname = "%s%s" % (act, doctype) ## Get all of the form fields that appear on this page, ordered by fieldnum: form_fields = get_form_fields_on_submission_page(subname, curpage) full_fields = [] values = [] the_globals = { 'doctype' : doctype, 'action' : action, 'access' : access, 'ln' : ln, 'curdir' : curdir, 'uid' : uid, 'uid_email' : uid_email, 'form' : form, 'act' : act, 'action' : act, ## for backward compatibility 'req' : req, 'user_info' : user_info, 'InvenioWebSubmitFunctionError' : InvenioWebSubmitFunctionError, '__websubmit_in_jail__' : True, '__builtins__' : globals()['__builtins__'] } for field_instance in form_fields: full_field = {} ## Retrieve the field's description: element_descr = get_element_description(field_instance[3]) try: assert(element_descr is not None) except AssertionError: msg = _("Unknown form field found on submission page.") register_exception(req=req, alert_admin=True, prefix=msg) ## The form field doesn't seem to exist - return with error message: return warning_page(_("Unknown form field found on submission page."), req, ln) if element_descr[8] is None: val = "" else: val = element_descr[8] ## we also retrieve and add the javascript code of the checking function, if needed ## Set it to empty string to begin with: full_field['javascript'] = '' if field_instance[7] != '': check_descr = get_element_check_description(field_instance[7]) if check_descr is not None: ## Retrieved the check description: full_field['javascript'] = check_descr full_field['type'] = element_descr[3] full_field['name'] = field_instance[3] full_field['rows'] = element_descr[5] full_field['cols'] = element_descr[6] full_field['val'] = val full_field['size'] = element_descr[4] full_field['maxlength'] = element_descr[7] full_field['htmlcode'] = element_descr[9] full_field['typename'] = field_instance[1] ## TODO: Investigate this, Not used? ## It also seems to refer to pagenum. # The 'R' fields must be executed in the engine's environment, # as the runtime functions access some global and local # variables. if full_field ['type'] == 'R': try: co = compile (full_field ['htmlcode'].replace("\r\n","\n"), "<string>", "exec") the_globals['text'] = '' the_globals['custom_level'] = None exec co in the_globals text = the_globals['text'] # Also get the custom_level if it's define in the element description custom_level = the_globals.get('custom_level') # Make sure custom_level has an appropriate value or default to 'O' if custom_level not in ('M', 'O', None): custom_level = 'O' except: register_exception(req=req, alert_admin=True, prefix="Error in evaluating response element %s with globals %s" % (pprint.pformat(full_field), pprint.pformat(the_globals))) raise else: text = websubmit_templates.tmpl_submit_field (ln = ln, field = full_field) # Provide a default value for the custom_level custom_level = None # we now determine the exact type of the created field if full_field['type'] not in [ 'D','R']: field.append(full_field['name']) level.append(custom_level is None and field_instance[5] or custom_level) fullDesc.append(field_instance[4]) txt.append(field_instance[6]) check.append(field_instance[7]) # If the field is not user-defined, we try to determine its type # (select, radio, file upload...) # check whether it is a select field or not if re.search("SELECT", text, re.IGNORECASE) is not None: select.append(1) else: select.append(0) # checks whether it is a radio field or not if re.search(r"TYPE=[\"']?radio", text, re.IGNORECASE) is not None: radio.append(1) else: radio.append(0) # checks whether it is a file upload or not if re.search(r"TYPE=[\"']?file", text, re.IGNORECASE) is not None: upload.append(1) else: upload.append(0) # if the field description contains the "<COMBO>" string, replace # it by the category selected on the document page submission page combofile = "combo%s" % doctype if os.path.exists("%s/%s" % (curdir, combofile)): f = open("%s/%s" % (curdir, combofile), "r") combo = f.read() f.close() else: combo = "" text = text.replace("<COMBO>", combo) # if there is a <YYYY> tag in it, replace it by the current year year = time.strftime("%Y"); text = text.replace("<YYYY>", year) # if there is a <TODAY> tag in it, replace it by the current year today = time.strftime("%d/%m/%Y"); text = text.replace("<TODAY>", today) fieldhtml.append(text) else: select.append(0) radio.append(0) upload.append(0) # field.append(value) - initial version, not working with JS, taking a submitted value field.append(field_instance[3]) level.append(custom_level is None and field_instance[5] or custom_level) txt.append(field_instance[6]) fullDesc.append(field_instance[4]) check.append(field_instance[7]) fieldhtml.append(text) full_field['fullDesc'] = field_instance[4] full_field['text'] = text # If a file exists with the name of the field we extract the saved value text = '' if os.path.exists(os.path.join(curdir, full_field['name'])): file = open(os.path.join(curdir, full_field['name']), "r"); text = file.read() file.close() values.append(text) full_fields.append(full_field) returnto = {} if int(curpage) == int(nbpages): subname = "%s%s" % (act, doctype) other_form_fields = \ get_form_fields_not_on_submission_page(subname, curpage) nbFields = 0 message = "" fullcheck_select = [] fullcheck_radio = [] fullcheck_upload = [] fullcheck_field = [] fullcheck_level = [] fullcheck_txt = [] fullcheck_noPage = [] fullcheck_check = [] for field_instance in other_form_fields: if field_instance[5] == "M": ## If this field is mandatory, get its description: element_descr = get_element_description(field_instance[3]) try: assert(element_descr is not None) except AssertionError: msg = _("Unknown form field found on submission page.") register_exception(req=req, alert_admin=True, prefix=msg) ## The form field doesn't seem to exist - return with error message: return warning_page(_("Unknown form field found on submission page."), req, ln) if element_descr[3] in ['D', 'R']: if element_descr[3] == "D": text = element_descr[9] else: text = eval(element_descr[9]) formfields = text.split(">") for formfield in formfields: match = re.match("name=([^ <>]+)", formfield, re.IGNORECASE) if match is not None: names = match.groups for value in names: if value != "": value = re.compile("[\"']+").sub("", value) fullcheck_field.append(value) fullcheck_level.append(field_instance[5]) fullcheck_txt.append(field_instance[6]) fullcheck_noPage.append(field_instance[1]) fullcheck_check.append(field_instance[7]) nbFields = nbFields + 1 else: fullcheck_noPage.append(field_instance[1]) fullcheck_field.append(field_instance[3]) fullcheck_level.append(field_instance[5]) fullcheck_txt.append(field_instance[6]) fullcheck_check.append(field_instance[7]) nbFields = nbFields+1 # tests each mandatory field fld = 0 res = 1 for i in xrange(nbFields): res = 1 if not os.path.exists(os.path.join(curdir, fullcheck_field[i])): res = 0 else: file = open(os.path.join(curdir, fullcheck_field[i]), "r") text = file.read() if text == '': res = 0 else: if text == "Select:": res = 0 if res == 0: fld = i break if not res: returnto = { 'field' : fullcheck_txt[fld], 'page' : fullcheck_noPage[fld], } t += websubmit_templates.tmpl_page_interface( ln = ln, docname = docname, actname = actname, curpage = curpage, nbpages = nbpages, nextPg = nextPg, access = access, nbPg = nbPg, doctype = doctype, act = act, fields = full_fields, javascript = websubmit_templates.tmpl_page_interface_js( ln = ln, upload = upload, field = field, fieldhtml = fieldhtml, txt = txt, check = check, level = level, curdir = curdir, values = values, select = select, radio = radio, curpage = curpage, nbpages = nbpages, returnto = returnto, ), mainmenu = mainmenu, ) t += websubmit_templates.tmpl_page_do_not_leave_submission_js(ln) # start display: req.content_type = "text/html" req.send_http_header() p_navtrail = """<a href="/submit?ln=%(ln)s" class="navtrail">%(submit)s</a>&nbsp;>&nbsp;<a href="/submit?doctype=%(doctype)s&amp;ln=%(ln)s" class="navtrail">%(docname)s</a>&nbsp;""" % { 'submit' : _("Submit"), 'doctype' : quote_plus(doctype), 'docname' : docname, 'ln' : ln } ## add MathJax if wanted if CFG_WEBSUBMIT_USE_MATHJAX: metaheaderadd = get_mathjax_header(req.is_https()) metaheaderadd += websubmit_templates.tmpl_mathpreview_header(ln, req.is_https()) else: metaheaderadd = '' return page(title= actname, body = t, navtrail = p_navtrail, description = "submit documents", keywords = "submit", uid = uid, language = ln, req = req, navmenuid='submit', metaheaderadd=metaheaderadd) def endaction(req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG, doctype="", act="", startPg=1, access="", mainmenu="", fromdir="", nextPg="", nbPg="", curpage=1, step=1, mode="U"): """Having filled-in the WebSubmit form created for metadata by the interface function, the user clicks a button to either "finish the submission" or to "proceed" to the next stage of the submission. At this point, a variable called "step" will be given a value of 1 or above, which means that this function is called by websubmit_webinterface. So, during all non-zero steps of the submission, this function is called. In other words, this function is called during the BACK-END phase of a submission, in which WebSubmit *functions* are being called. The function first ensures that all of the WebSubmit form field values have been saved in the current working submission directory, in text- files with the same name as the field elements have. It then determines the functions to be called for the given step of the submission, and executes them. Following this, if this is the last step of the submission, it logs the submission as "finished" in the journal of submissions. @param req: (apache request object) *** NOTE: Added into this object, is a variable called "form" (req.form). This is added into the object in the index function of websubmit_webinterface. It contains a "mod_python.util.FieldStorage" instance, that contains the form-fields found on the previous submission page. @param c: (string), defaulted to CFG_SITE_NAME. The name of the Invenio installation. @param ln: (string), defaulted to CFG_SITE_LANG. The language in which to display the pages. @param doctype: (string) - the doctype ID of the doctype for which the submission is being made. @param act: (string) - The ID of the action being performed (e.g. submission of bibliographic information; modification of bibliographic information, etc). @param startPg: (integer) - Starting page for the submission? Defaults to 1. @param indir: (string) - the directory used to store all submissions of the given "type" of this submission. For example, if the submission is of the type "modify bibliographic information", this variable would contain "modify". @param access: (string) - the "access" number for the submission (e.g. 1174062451_7010). This number is also used as the name for the current working submission directory. @param mainmenu: (string) - contains the URL (minus the Invenio home stem) for the submission's home-page. (E.g. If this submission is "PICT", the "mainmenu" file would contain "/submit?doctype=PICT". @param fromdir: @param nextPg: @param nbPg: @param curpage: (integer) - the current submission page number. Defaults to 1. @param step: (integer) - the current step of the submission. Defaults to 1. @param mode: """ # load the right message language _ = gettext_set_language(ln) dismode = mode ln = wash_language(ln) sys.stdout = req rn = "" t = "" # get user ID: uid = getUid(req) uid_email = get_email(uid) ## Get the submission storage directory from the DB: submission_dir = get_storage_directory_of_action(act) if submission_dir: indir = submission_dir else: ## Unable to determine the submission-directory: return warning_page(_("Unable to find the submission directory for the action: %(x_dir)s", x_dir=escape(str(act))), req, ln) curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, indir, doctype, access) if os.path.exists(os.path.join(curdir, "combo%s" % doctype)): fp = open(os.path.join(curdir, "combo%s" % doctype), "r"); categ = fp.read() fp.close() else: categ = req.form.get('combo%s' % doctype, '*') # is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action(req, 'submit', \ authorized_if_no_roles=not isGuestUser(uid), \ verbose=0, \ doctype=doctype, \ act=act, \ categ=categ) if not auth_code == 0: return warning_page("""<center><font color="red">%s</font></center>""" % auth_message, req, ln) # Preliminary tasks ## check we have minimum fields if not doctype or not act or not access: ## We don't have all the necessary information to go ahead ## with this submission: return warning_page(_("Not enough information to go ahead with the submission."), req, ln) if doctype and act: ## Let's clean the input details = get_details_of_submission(doctype, act) if not details: return warning_page(_("Invalid doctype and act parameters"), req, ln) doctype = details[0] act = details[1] try: assert(not access or re.match('\d+_\d+', access)) except AssertionError: register_exception(req=req, prefix='doctype="%s", access="%s"' % (doctype, access)) return warning_page(_("Invalid parameters"), req, ln) ## Before continuing to process the submitted data, verify that ## this submission has not already been completed: if submission_is_finished(doctype, act, access, uid_email): ## This submission has already been completed. ## This situation can arise when, having completed a submission, ## the user uses the browser's back-button to go back to the form ## stage of the submission and then tries to submit once more. ## This is unsafe and should not be allowed. Instead of re-processing ## the submitted data, display an error message to the user: wrnmsg = """<b>This submission has been completed. Please go to the""" \ """ <a href="/submit?doctype=%(doctype)s&amp;ln=%(ln)s">""" \ """main menu</a> to start a new submission.</b>""" \ % { 'doctype' : quote_plus(doctype), 'ln' : ln } return warning_page(wrnmsg, req, ln) ## Get the number of pages for this submission: subname = "%s%s" % (act, doctype) ## retrieve the action and doctype data ## Get the submission storage directory from the DB: submission_dir = get_storage_directory_of_action(act) if submission_dir: indir = submission_dir else: ## Unable to determine the submission-directory: return warning_page(_("Unable to find the submission directory for the action: %(x_dir)s", x_dir=escape(str(act))), req, ln) # The following words are reserved and should not be used as field names reserved_words = ["stop", "file", "nextPg", "startPg", "access", "curpage", "nbPg", "act", \ "indir", "doctype", "mode", "step", "deleted", "file_path", "userfile_name"] # This defines the path to the directory containing the action data curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, indir, doctype, access) try: assert(curdir == os.path.abspath(curdir)) except AssertionError: register_exception(req=req, prefix='indir="%s", doctype=%s, access=%s' % (indir, doctype, access)) return warning_page(_("Invalid parameters"), req, ln) ## If the submission directory still does not exist, we create it if not os.path.exists(curdir): try: os.makedirs(curdir) except Exception as e: register_exception(req=req, alert_admin=True) return warning_page(_("Unable to create a directory for this submission. The administrator has been alerted."), req, ln) # retrieve the original main menu url ans save it in the "mainmenu" file if mainmenu != "": fp = open(os.path.join(curdir, "mainmenu"), "w") fp.write(mainmenu) fp.close() # and if the file containing the URL to the main menu exists # we retrieve it and store it in the $mainmenu variable if os.path.exists(os.path.join(curdir, "mainmenu")): fp = open(os.path.join(curdir, "mainmenu"), "r"); mainmenu = fp.read() fp.close() else: mainmenu = "%s/submit" % (CFG_SITE_URL,) num_submission_pages = get_num_pages_of_submission(subname) if num_submission_pages is not None: nbpages = num_submission_pages else: ## Unable to determine the number of pages for this submission: return warning_page(_("Unable to determine the number of submission pages."), \ req, ln) ## Retrieve the previous page, as submitted to curdir (before we ## overwrite it with our curpage as declared from the incoming ## form) try: fp = open(os.path.join(curdir, "curpage")) previous_page_from_disk = fp.read() fp.close() except: previous_page_from_disk = str(num_submission_pages) ## retrieve the name of the file in which the reference of ## the submitted document will be stored rn_filename = get_parameter_value_for_doctype(doctype, "edsrn") if rn_filename is not None: edsrn = rn_filename else: ## Unknown value for edsrn - set it to an empty string: edsrn = "" ## Determine whether the action is finished ## (ie there are no other steps after the current one): finished = function_step_is_last(doctype, act, step) ## Let's write in curdir file under curdir the curdir value ## in case e.g. it is needed in FFT. fp = open(os.path.join(curdir, "curdir"), "w") fp.write(curdir) fp.close() ## Let's write in ln file the current language fp = open(os.path.join(curdir, "ln"), "w") fp.write(ln) fp.close() # Save the form fields entered in the previous submission page # If the form was sent with the GET method form = req.form value = "" # we parse all the form variables for key in form.keys(): formfields = form[key] filename = key.replace("[]", "") file_to_open = os.path.join(curdir, filename) try: assert(file_to_open == os.path.abspath(file_to_open)) assert(file_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", filename="%s"' % (curdir, filename)) return warning_page(_("Invalid parameters"), req, ln) # Do not write reserved filenames to disk if filename in CFG_RESERVED_SUBMISSION_FILENAMES: # Unless there is really an element with that name on this # page, or on the previously visited one, which means that # admin authorized it. Note that in endaction() curpage is # equivalent to the "previous" page value if not ((previous_page_from_disk.isdigit() and \ filename in [submission_field[3] for submission_field in \ get_form_fields_on_submission_page(subname, int(previous_page_from_disk))]) or \ (str(curpage).isdigit() and int(curpage) > 1 and \ filename in [submission_field[3] for submission_field in \ get_form_fields_on_submission_page(subname, int(curpage) - 1)])): # might have been called by functions such as # Create_Modify_Interface function in MBI step, or # dynamic fields in response elements, but that is # unlikely to be a problem. continue # Skip variables containing characters that are not allowed in # WebSubmit elements if not string_is_alphanumeric_including_underscore(filename): continue # the field is an array if isinstance(formfields, types.ListType): fp = open(file_to_open, "w") for formfield in formfields: #stripslashes(value) value = specialchars(formfield) fp.write(value+"\n") fp.close() # the field is a normal string elif isinstance(formfields, types.StringTypes) and formfields != "": value = formfields fp = open(file_to_open, "w") fp.write(specialchars(value)) fp.close() # the field is a file elif hasattr(formfields, "filename") and formfields.filename: dir_to_open = os.path.join(curdir, 'files', key) try: assert(dir_to_open == os.path.abspath(dir_to_open)) assert(dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) return warning_page(_("Invalid parameters"), req, ln) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except: register_exception(req=req, alert_admin=True) return warning_page(_("Cannot create submission directory. The administrator has been alerted."), req, ln) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": fp = open(os.path.join(dir_to_open, filename), "w") while True: buf = formfields.file.read(10240) if buf: fp.write(buf) else: break fp.close() fp = open(os.path.join(curdir, "lastuploadedfile"), "w") fp.write(filename) fp.close() fp = open(file_to_open, "w") fp.write(filename) fp.close() else: return warning_page(_("No file uploaded?"), req, ln) ## if the found field is the reference of the document ## we save this value in the "journal of submissions" if uid_email != "" and uid_email != "guest": if key == edsrn: update_submission_reference_in_log(doctype, access, uid_email, value) ## get the document type's long-name: doctype_lname = get_longname_of_doctype(doctype) if doctype_lname is not None: ## Got the doctype long-name: replace spaces with HTML chars: docname = doctype_lname.replace(" ", "&nbsp;") else: ## Unknown document type: return warning_page(_("Unknown document type"), req, ln) ## get the action's long-name: actname = get_longname_of_action(act) if actname is None: ## Unknown action: return warning_page(_("Unknown action"), req, ln) ## Determine whether the action is finished ## (ie there are no other steps after the current one): last_step = function_step_is_last(doctype, act, step) next_action = '' ## The next action to be proposed to the user # Prints the action details, returning the mandatory score action_score = action_details(doctype, act) current_level = get_level(doctype, act) # Calls all the function's actions function_content = '' try: ## Handle the execution of the functions for this ## submission/step: start_time = time.time() (function_content, last_step, action_score, rn) = \ print_function_calls(req=req, doctype=doctype, action=act, step=step, form=form, start_time=start_time, access=access, curdir=curdir, dismode=mode, rn=rn, last_step=last_step, action_score=action_score, ln=ln) except InvenioWebSubmitFunctionError as e: register_exception(req=req, alert_admin=True, prefix='doctype="%s", action="%s", step="%s", form="%s", start_time="%s"' % (doctype, act, step, form, start_time)) ## There was a serious function-error. Execution ends. if CFG_DEVEL_SITE: raise else: return warning_page(_("A serious function-error has been encountered. Adminstrators have been alerted. <br /><em>Please not that this might be due to wrong characters inserted into the form</em> (e.g. by copy and pasting some text from a PDF file)."), req, ln) except InvenioWebSubmitFunctionStop as e: ## For one reason or another, one of the functions has determined that ## the data-processing phase (i.e. the functions execution) should be ## halted and the user should be returned to the form interface once ## more. (NOTE: Redirecting the user to the Web-form interface is ## currently done using JavaScript. The "InvenioWebSubmitFunctionStop" ## exception contains a "value" string, which is effectively JavaScript ## - probably an alert box and a form that is submitted). **THIS WILL ## CHANGE IN THE FUTURE WHEN JavaScript IS REMOVED!** if e.value is not None: function_content = e.value else: function_content = e else: ## No function exceptions (InvenioWebSubmitFunctionStop, ## InvenioWebSubmitFunctionError) were raised by the functions. Propose ## the next action (if applicable), and log the submission as finished: ## If the action was mandatory we propose the next ## mandatory action (if any) if action_score != -1 and last_step == 1: next_action = Propose_Next_Action(doctype, \ action_score, \ access, \ current_level, \ indir) ## If we are in the last step of an action, we can update ## the "journal of submissions" if last_step == 1: if uid_email != "" and uid_email != "guest": ## update the "journal of submission": ## Does the submission already exist in the log? submission_exists = \ submission_exists_in_log(doctype, act, access, uid_email) if submission_exists == 1: ## update the rn and status to finished for this submission ## in the log: update_submission_reference_and_status_in_log(doctype, \ act, \ access, \ uid_email, \ rn, \ "finished") else: ## Submission doesn't exist in log - create it: log_new_completed_submission(doctype, \ act, \ access, \ uid_email, \ rn) ## Having executed the functions, create the page that will be displayed ## to the user: t = websubmit_templates.tmpl_page_endaction( ln = ln, # these fields are necessary for the navigation nextPg = nextPg, startPg = startPg, access = access, curpage = curpage, nbPg = nbPg, nbpages = nbpages, doctype = doctype, act = act, docname = docname, actname = actname, mainmenu = mainmenu, finished = finished, function_content = function_content, next_action = next_action, ) if finished: # register event in webstat try: register_customevent("websubmissions", [get_longname_of_doctype(doctype)]) except: register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'") else: t += websubmit_templates.tmpl_page_do_not_leave_submission_js(ln) # start display: req.content_type = "text/html" req.send_http_header() p_navtrail = '<a href="/submit?ln='+ln+'" class="navtrail">' + _("Submit") +\ """</a>&nbsp;>&nbsp;<a href="/submit?doctype=%(doctype)s&amp;ln=%(ln)s" class="navtrail">%(docname)s</a>""" % { 'doctype' : quote_plus(doctype), 'docname' : docname, 'ln' : ln, } ## add MathJax if wanted if CFG_WEBSUBMIT_USE_MATHJAX: metaheaderadd = get_mathjax_header(req.is_https()) metaheaderadd += websubmit_templates.tmpl_mathpreview_header(ln, req.is_https()) else: metaheaderadd = '' return page(title= actname, body = t, navtrail = p_navtrail, description="submit documents", keywords="submit", uid = uid, language = ln, req = req, navmenuid='submit', metaheaderadd=metaheaderadd) def home(req, catalogues_text, c=CFG_SITE_NAME, ln=CFG_SITE_LANG): """This function generates the WebSubmit "home page". Basically, this page contains a list of submission-collections in WebSubmit, and gives links to the various document-type submissions. Document-types only appear on this page when they have been connected to a submission-collection in WebSubmit. @param req: (apache request object) @param catalogues_text (string): the computed catalogues tree @param c: (string) - defaults to CFG_SITE_NAME @param ln: (string) - The Invenio interface language of choice. Defaults to CFG_SITE_LANG (the default language of the installation). @return: (string) - the Web page to be displayed. """ ln = wash_language(ln) # get user ID: try: uid = getUid(req) user_info = collect_user_info(req) except Error as e: return error_page(e, req, ln) # load the right message language _ = gettext_set_language(ln) finaltext = websubmit_templates.tmpl_submit_home_page( ln = ln, catalogues = catalogues_text, user_info = user_info, ) return page(title=_("Submit"), body=finaltext, navtrail=[], description="submit documents", keywords="submit", uid=uid, language=ln, req=req, navmenuid='submit' ) def makeCataloguesTable(req, ln=CFG_SITE_LANG): """Build the 'catalogues' (submission-collections) tree for the WebSubmit home-page. This tree contains the links to the various document types in WebSubmit. @param req: (dict) - the user request object in order to decide whether to display a submission. @param ln: (string) - the language of the interface. (defaults to 'CFG_SITE_LANG'). @return: (string, bool, bool) - the submission-collections tree. True if there is at least one submission authorized for the user True if there is at least one submission """ def is_at_least_one_submission_authorized(cats): for cat in cats: if cat['docs']: return True if is_at_least_one_submission_authorized(cat['sons']): return True return False text = "" catalogues = [] ## Get the submission-collections attached at the top level ## of the submission-collection tree: top_level_collctns = get_collection_children_of_submission_collection(0) if len(top_level_collctns) != 0: ## There are submission-collections attatched to the top level. ## retrieve their details for displaying: for child_collctn in top_level_collctns: catalogues.append(getCatalogueBranch(child_collctn[0], 1, req)) text = websubmit_templates.tmpl_submit_home_catalogs( ln=ln, catalogs=catalogues) submissions_exist = True at_least_one_submission_authorized = is_at_least_one_submission_authorized(catalogues) else: text = websubmit_templates.tmpl_submit_home_catalog_no_content(ln=ln) submissions_exist = False at_least_one_submission_authorized = False return text, at_least_one_submission_authorized, submissions_exist def getCatalogueBranch(id_father, level, req): """Build up a given branch of the submission-collection tree. I.e. given a parent submission-collection ID, build up the tree below it. This tree will include doctype-children, as well as other submission- collections and their children. Finally, return the branch as a dictionary. @param id_father: (integer) - the ID of the submission-collection from which to begin building the branch. @param level: (integer) - the level of the current submission- collection branch. @param req: (dict) - the user request object in order to decide whether to display a submission. @return: (dictionary) - the branch and its sub-branches. """ elem = {} ## The dictionary to contain this branch of the tree. ## First, get the submission-collection-details: collctn_name = get_submission_collection_name(id_father) if collctn_name is not None: ## Got the submission-collection's name: elem['name'] = collctn_name else: ## The submission-collection is unknown to the DB ## set its name as empty: elem['name'] = "" elem['id'] = id_father elem['level'] = level ## Now get details of the doctype-children of this ## submission-collection: elem['docs'] = [] ## List to hold the doctype-children ## of the submission-collection doctype_children = \ get_doctype_children_of_submission_collection(id_father) user_info = collect_user_info(req) for child_doctype in doctype_children: ## To get access to a submission pipeline for a logged in user, ## it is decided by any authorization. If none are defined for the action ## then a logged in user will get access. ## If user is not logged in, a specific rule to allow the action is needed if acc_authorize_action(req, 'submit', \ authorized_if_no_roles=not isGuestUser(user_info['uid']), \ doctype=child_doctype[0])[0] == 0: elem['docs'].append(getDoctypeBranch(child_doctype[0])) ## Now, get the collection-children of this submission-collection: elem['sons'] = [] collctn_children = \ get_collection_children_of_submission_collection(id_father) for child_collctn in collctn_children: elem['sons'].append(getCatalogueBranch(child_collctn[0], level + 1, req)) ## Now return this branch of the built-up 'collection-tree': return elem def getDoctypeBranch(doctype): """Create a document-type 'leaf-node' for the submission-collections tree. Basically, this leaf is a dictionary containing the name and ID of the document-type submission to which it links. @param doctype: (string) - the ID of the document type. @return: (dictionary) - the document-type 'leaf node'. Contains the following values: + id: (string) - the document-type ID. + name: (string) - the (long) name of the document-type. """ ldocname = get_longname_of_doctype(doctype) if ldocname is None: ldocname = "Unknown Document Type" return { 'id' : doctype, 'name' : ldocname, } def displayCatalogueBranch(id_father, level, catalogues): text = "" collctn_name = get_submission_collection_name(id_father) if collctn_name is None: ## If this submission-collection wasn't known in the DB, ## give it the name "Unknown Submission-Collection" to ## avoid errors: collctn_name = "Unknown Submission-Collection" ## Now, create the display for this submission-collection: if level == 1: text = "<LI><font size=\"+1\"><strong>%s</strong></font>\n" \ % collctn_name else: ## TODO: These are the same (and the if is ugly.) Why? if level == 2: text = "<LI>%s\n" % collctn_name else: if level > 2: text = "<LI>%s\n" % collctn_name ## Now display the children document-types that are attached ## to this submission-collection: ## First, get the children: doctype_children = get_doctype_children_of_submission_collection(id_father) collctn_children = get_collection_children_of_submission_collection(id_father) if len(doctype_children) > 0 or len(collctn_children) > 0: ## There is something to display, so open a list: text = text + "<UL>\n" ## First, add the doctype leaves of this branch: for child_doctype in doctype_children: ## Add the doctype 'leaf-node': text = text + displayDoctypeBranch(child_doctype[0], catalogues) ## Now add the submission-collection sub-branches: for child_collctn in collctn_children: catalogues.append(child_collctn[0]) text = text + displayCatalogueBranch(child_collctn[0], level+1, catalogues) ## Finally, close up the list if there were nodes to display ## at this branch: if len(doctype_children) > 0 or len(collctn_children) > 0: text = text + "</UL>\n" return text def displayDoctypeBranch(doctype, catalogues): text = "" ldocname = get_longname_of_doctype(doctype) if ldocname is None: ldocname = "Unknown Document Type" text = "<LI><a href=\"\" onmouseover=\"javascript:" \ "popUpTextWindow('%s',true,event);\" onmouseout" \ "=\"javascript:popUpTextWindow('%s',false,event);\" " \ "onClick=\"document.forms[0].doctype.value='%s';" \ "document.forms[0].submit();return false;\">%s</a>\n" \ % (doctype, doctype, doctype, ldocname) return text def action(req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG, doctype=""): # load the right message language _ = gettext_set_language(ln) nbCateg = 0 snameCateg = [] lnameCateg = [] actionShortDesc = [] indir = [] actionbutton = [] statustext = [] t = "" ln = wash_language(ln) # get user ID: try: uid = getUid(req) except Error as e: return error_page(e, req, ln) #parses database to get all data ## first, get the list of categories doctype_categs = get_categories_of_doctype(doctype) for doctype_categ in doctype_categs: if not acc_authorize_action(req, 'submit', \ authorized_if_no_roles=not isGuestUser(uid), \ verbose=0, \ doctype=doctype, \ categ=doctype_categ[0])[0] == 0: # This category is restricted for this user, move on to the next categories. continue nbCateg = nbCateg+1 snameCateg.append(doctype_categ[0]) lnameCateg.append(doctype_categ[1]) ## Now get the details of the document type: doctype_details = get_doctype_details(doctype) if doctype_details is None: ## Doctype doesn't exist - raise error: return warning_page(_("Unable to find document type: %(doctype)s", doctype=escape(str(doctype))), req, ln) else: docFullDesc = doctype_details[0] # Also update the doctype as returned by the database, since # it might have a differnent case (eg. DemOJrN->demoJRN) doctype = docShortDesc = doctype_details[1] description = doctype_details[4] ## Get the details of the actions supported by this document-type: doctype_actions = get_actions_on_submission_page_for_doctype(doctype) for doctype_action in doctype_actions: if not acc_authorize_action(req, 'submit', \ authorized_if_no_roles=not isGuestUser(uid), \ doctype=doctype, \ act=doctype_action[0])[0] == 0: # This action is not authorized for this user, move on to the next actions. continue ## Get the details of this action: action_details = get_action_details(doctype_action[0]) if action_details is not None: actionShortDesc.append(doctype_action[0]) indir.append(action_details[1]) actionbutton.append(action_details[4]) statustext.append(action_details[5]) if not snameCateg and not actionShortDesc: if isGuestUser(uid): # If user is guest and does not have access to any of the # categories, offer to login. return redirect_to_url(req, "%s/youraccount/login%s" % ( CFG_SITE_SECURE_URL, make_canonical_urlargd({'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri, 'ln' : ln}, {})), norobot=True) else: return page_not_authorized(req, "../submit", uid=uid, text=_("You are not authorized to access this submission interface."), navmenuid='submit') ## Send the gathered information to the template so that the doctype's ## home-page can be displayed: t = websubmit_templates.tmpl_action_page( ln=ln, uid=uid, pid = os.getpid(), now = time.time(), doctype = doctype, description = description, docfulldesc = docFullDesc, snameCateg = snameCateg, lnameCateg = lnameCateg, actionShortDesc = actionShortDesc, indir = indir, # actionbutton = actionbutton, statustext = statustext, ) p_navtrail = """<a href="/submit?ln=%(ln)s" class="navtrail">%(submit)s</a>""" % {'submit' : _("Submit"), 'ln' : ln} return page(title = docFullDesc, body=t, navtrail=p_navtrail, description="submit documents", keywords="submit", uid=uid, language=ln, req=req, navmenuid='submit' ) def Request_Print(m, txt): """The argumemts to this function are the display mode (m) and the text to be displayed (txt). """ return txt def Evaluate_Parameter (field, doctype): # Returns the literal value of the parameter. Assumes that the value is # uniquely determined by the doctype, i.e. doctype is the primary key in # the table # If the table name is not null, evaluate the parameter ## TODO: The above comment looks like nonesense? This ## function only seems to get the values of parameters ## from the db... ## Get the value for the parameter: param_val = get_parameter_value_for_doctype(doctype, field) if param_val is None: ## Couldn't find a value for this parameter for this doctype. ## Instead, try with the default doctype (DEF): param_val = get_parameter_value_for_doctype("DEF", field) if param_val is None: ## There was no value for the parameter for the default doctype. ## Nothing can be done about it - return an empty string: return "" else: ## There was some kind of value for the parameter; return it: return param_val def Get_Parameters (function, doctype): """For a given function of a given document type, a dictionary of the parameter names and values are returned. @param function: (string) - the name of the function for which the parameters are to be retrieved. @param doctype: (string) - the ID of the document type. @return: (dictionary) - of the parameters of the function. Keyed by the parameter name, values are of course the parameter values. """ parray = {} ## Get the names of the parameters expected by this function: func_params = get_parameters_of_function(function) for func_param in func_params: ## For each of the parameters, get its value for this document- ## type and add it into the dictionary of parameters: parameter = func_param[0] parray[parameter] = Evaluate_Parameter (parameter, doctype) return parray def get_level(doctype, action): """Get the level of a given submission. If unknown, return 0 as the level. @param doctype: (string) - the ID of the document type. @param action: (string) - the ID of the action. @return: (integer) - the level of the submission; 0 otherwise. """ subm_details = get_details_of_submission(doctype, action) if subm_details is not None: ## Return the level of this action subm_level = subm_details[9] try: int(subm_level) except ValueError: return 0 else: return subm_level else: return 0 def action_details (doctype, action): # Prints whether the action is mandatory or optional. The score of the # action is returned (-1 if the action was optional) subm_details = get_details_of_submission(doctype, action) if subm_details is not None: if subm_details[9] != "0": ## This action is mandatory; return the score: return subm_details[10] else: return -1 else: return -1 def print_function_calls(req, doctype, action, step, form, start_time, access, curdir, dismode, rn, last_step, action_score, ln=CFG_SITE_LANG): """ Calls the functions required by an 'action' action on a 'doctype' document In supervisor mode, a table of the function calls is produced @return: (function_output_string, last_step, action_score, rn) """ user_info = collect_user_info(req) # load the right message language _ = gettext_set_language(ln) t = "" ## Here follows the global protect environment. the_globals = { 'doctype' : doctype, 'action' : action, 'act' : action, ## for backward compatibility 'step' : step, 'access' : access, 'ln' : ln, 'curdir' : curdir, 'uid' : user_info['uid'], 'uid_email' : user_info['email'], 'rn' : rn, 'last_step' : last_step, 'action_score' : action_score, '__websubmit_in_jail__' : True, 'form' : form, 'user_info' : user_info, '__builtins__' : globals()['__builtins__'], 'Request_Print': Request_Print } ## Get the list of functions to be called funcs_to_call = get_functions_for_submission_step(doctype, action, step) ## If no functions are found at this step for this doctype, ## get the functions for the DEF(ault) doctype: if len(funcs_to_call) == 0: funcs_to_call = get_functions_for_submission_step("DEF", action, step) if len(funcs_to_call) > 0: # while there are functions left... functions = [] for function in funcs_to_call: try: function_name = function[0] function_score = function[1] currfunction = { 'name' : function_name, 'score' : function_score, 'error' : 0, 'text' : '', } #FIXME: deprecated from invenio.legacy.websubmit import functions as legacy_functions function_path = os.path.join(legacy_functions.__path__[0], function_name + '.py') if os.path.exists(function_path): # import the function itself #function = getattr(invenio.legacy.websubmit.functions, function_name) execfile(function_path, the_globals) if function_name not in the_globals: currfunction['error'] = 1 else: the_globals['function'] = the_globals[function_name] # Evaluate the parameters, and place them in an array the_globals['parameters'] = Get_Parameters(function_name, doctype) # Call function: log_function(curdir, "Start %s" % function_name, start_time) try: try: ## Attempt to call the function with 4 arguments: ## ("parameters", "curdir" and "form" as usual), ## and "user_info" - the dictionary of user ## information: ## ## Note: The function should always be called with ## these keyword arguments because the "TypeError" ## except clause checks for a specific mention of ## the 'user_info' keyword argument when a legacy ## function (one that accepts only 'parameters', ## 'curdir' and 'form') has been called and if ## the error string doesn't contain this, ## the TypeError will be considered as a something ## that was incorrectly handled in the function and ## will be propagated as an ## InvenioWebSubmitFunctionError instead of the ## function being called again with the legacy 3 ## arguments. func_returnval = eval("function(parameters=parameters, curdir=curdir, form=form, user_info=user_info)", the_globals) except TypeError as err: ## If the error contains the string "got an ## unexpected keyword argument", it means that the ## function doesn't accept the "user_info" ## argument. Test for this: if "got an unexpected keyword argument 'user_info'" in \ str(err).lower(): ## As expected, the function doesn't accept ## the user_info keyword argument. Call it ## again with the legacy 3 arguments ## (parameters, curdir, form): func_returnval = eval("function(parameters=parameters, curdir=curdir, form=form)", the_globals) else: ## An unexpected "TypeError" was caught. ## It looks as though the function itself didn't ## handle something correctly. ## Convert this error into an ## InvenioWebSubmitFunctionError and raise it: msg = "Unhandled TypeError caught when " \ "calling [%s] WebSubmit function: " \ "[%s]: \n%s" % (function_name, str(err), traceback.format_exc()) raise InvenioWebSubmitFunctionError(msg) except InvenioWebSubmitFunctionWarning as err: ## There was an unexpected behaviour during the ## execution. Log the message into function's log ## and go to next function log_function(curdir, "***Warning*** from %s: %s" \ % (function_name, str(err)), start_time) ## Reset "func_returnval" to None: func_returnval = None register_exception(req=req, alert_admin=True, prefix="Warning in executing function %s with globals %s" % (pprint.pformat(currfunction), pprint.pformat(the_globals))) log_function(curdir, "End %s" % function_name, start_time) if func_returnval is not None: ## Append the returned value as a string: currfunction['text'] = str(func_returnval) else: ## The function the NoneType. Don't keep that value as ## the currfunction->text. Replace it with the empty ## string. currfunction['text'] = "" else: currfunction['error'] = 1 functions.append(currfunction) except InvenioWebSubmitFunctionStop as err: ## The submission asked to stop execution. This is ## ok. Do not alert admin, and raise exception further log_function(curdir, "***Stop*** from %s: %s" \ % (function_name, str(err)), start_time) raise except: register_exception(req=req, alert_admin=True, prefix="Error in executing function %s with globals %s" % (pprint.pformat(currfunction), pprint.pformat(the_globals))) raise t = websubmit_templates.tmpl_function_output( ln = ln, display_on = (dismode == 'S'), action = action, doctype = doctype, step = step, functions = functions, ) else : if dismode == 'S': t = "<br /><br /><b>" + _("The chosen action is not supported by the document type.") + "</b>" return (t, the_globals['last_step'], the_globals['action_score'], the_globals['rn']) def Propose_Next_Action (doctype, action_score, access, currentlevel, indir, ln=CFG_SITE_LANG): t = "" next_submissions = \ get_submissions_at_level_X_with_score_above_N(doctype, currentlevel, action_score) if len(next_submissions) > 0: actions = [] first_score = next_submissions[0][10] for action in next_submissions: if action[10] == first_score: ## Get the submission directory of this action: nextdir = get_storage_directory_of_action(action[1]) if nextdir is None: nextdir = "" curraction = { 'page' : action[11], 'action' : action[1], 'doctype' : doctype, 'nextdir' : nextdir, 'access' : access, 'indir' : indir, 'name' : action[12], } actions.append(curraction) t = websubmit_templates.tmpl_next_action( ln = ln, actions = actions, ) return t def specialchars(text): text = string.replace(text, "&#147;", "\042"); text = string.replace(text, "&#148;", "\042"); text = string.replace(text, "&#146;", "\047"); text = string.replace(text, "&#151;", "\055"); text = string.replace(text, "&#133;", "\056\056\056"); return text def log_function(curdir, message, start_time, filename="function_log"): """Write into file the message and the difference of time between starttime and current time @param curdir:(string) path to the destination dir @param message: (string) message to write into the file @param starttime: (float) time to compute from @param filname: (string) name of log file """ time_lap = "%.3f" % (time.time() - start_time) if os.access(curdir, os.F_OK|os.W_OK): fd = open("%s/%s" % (curdir, filename), "a+") fd.write("""%s --- %s\n""" % (message, time_lap)) fd.close()
gpl-2.0
davidcoallier/bigcouch
couchjs/scons/scons-local-2.0.1/SCons/Tool/suncc.py
61
1980
"""SCons.Tool.suncc Tool-specific initialization for Sun Solaris (Forte) CC and cc. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/suncc.py 5134 2010/08/16 23:02:40 bdeegan" import SCons.Util import cc def generate(env): """ Add Builders and construction variables for Forte C and C++ compilers to an Environment. """ cc.generate(env) env['CXX'] = 'CC' env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC') env['SHOBJPREFIX'] = 'so_' env['SHOBJSUFFIX'] = '.o' def exists(env): return env.Detect('CC') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
apache-2.0
kumarshivam675/Mobile10X-Hack
yowsup/layers/coder/layer.py
52
1480
from yowsup.layers import YowLayer, YowLayerEvent from yowsup.layers.network import YowNetworkLayer from .encoder import WriteEncoder from .decoder import ReadDecoder from .tokendictionary import TokenDictionary class YowCoderLayer(YowLayer): PROP_DOMAIN = "org.openwhatsapp.yowsup.prop.domain" PROP_RESOURCE = "org.openwhatsapp.yowsup.prop.resource" def __init__(self): YowLayer.__init__(self) tokenDictionary = TokenDictionary() self.writer = WriteEncoder(tokenDictionary) self.reader = ReadDecoder(tokenDictionary) def onEvent(self, event): if event.getName() == YowNetworkLayer.EVENT_STATE_CONNECTED: self.writer.reset() self.reader.reset() streamStartBytes = self.writer.getStreamStartBytes( self.getProp(self.__class__.PROP_DOMAIN), self.getProp(self.__class__.PROP_RESOURCE) ) for i in range(0, 4): self.write(streamStartBytes.pop(0)) self.write(streamStartBytes) def send(self, data): self.write(self.writer.protocolTreeNodeToBytes(data)) def receive(self, data): node = self.reader.getProtocolTreeNode(data) if node: self.toUpper(node) def write(self, i): if(type(i) in(list, tuple)): self.toLower(bytearray(i)) else: self.toLower(bytearray([i])) def __str__(self): return "Coder Layer"
gpl-3.0
JamesClough/networkx
networkx/algorithms/cycles.py
4
16726
""" ======================== Cycle finding algorithms ======================== """ # Copyright (C) 2010-2012 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. from collections import defaultdict import networkx as nx from networkx.utils import * from networkx.algorithms.traversal.edgedfs import helper_funcs, edge_dfs __all__ = [ 'cycle_basis','simple_cycles','recursive_simple_cycles', 'find_cycle' ] __author__ = "\n".join(['Jon Olav Vik <jonovik@gmail.com>', 'Dan Schult <dschult@colgate.edu>', 'Aric Hagberg <hagberg@lanl.gov>']) @not_implemented_for('directed') @not_implemented_for('multigraph') def cycle_basis(G,root=None): """ Returns a list of cycles which form a basis for cycles of G. A basis for cycles of a network is a minimal collection of cycles such that any cycle in the network can be written as a sum of cycles in the basis. Here summation of cycles is defined as "exclusive or" of the edges. Cycle bases are useful, e.g. when deriving equations for electric circuits using Kirchhoff's Laws. Parameters ---------- G : NetworkX Graph root : node, optional Specify starting node for basis. Returns ------- A list of cycle lists. Each cycle list is a list of nodes which forms a cycle (loop) in G. Examples -------- >>> G=nx.Graph() >>> nx.add_cycle(G, [0, 1, 2, 3]) >>> nx.add_cycle(G, [0, 3, 4, 5]) >>> print(nx.cycle_basis(G,0)) [[3, 4, 5, 0], [1, 2, 3, 0]] Notes ----- This is adapted from algorithm CACM 491 [1]_. References ---------- .. [1] Paton, K. An algorithm for finding a fundamental set of cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518. See Also -------- simple_cycles """ gnodes=set(G.nodes()) cycles=[] while gnodes: # loop over connected components if root is None: root=gnodes.pop() stack=[root] pred={root:root} used={root:set()} while stack: # walk the spanning tree finding cycles z=stack.pop() # use last-in so cycles easier to find zused=used[z] for nbr in G[z]: if nbr not in used: # new node pred[nbr]=z stack.append(nbr) used[nbr]=set([z]) elif nbr == z: # self loops cycles.append([z]) elif nbr not in zused:# found a cycle pn=used[nbr] cycle=[nbr,z] p=pred[z] while p not in pn: cycle.append(p) p=pred[p] cycle.append(p) cycles.append(cycle) used[nbr].add(z) gnodes-=set(pred) root=None return cycles @not_implemented_for('undirected') def simple_cycles(G): """Find simple cycles (elementary circuits) of a directed graph. A `simple cycle`, or `elementary circuit`, is a closed path where no node appears twice. Two elementary circuits are distinct if they are not cyclic permutations of each other. This is a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_. Parameters ---------- G : NetworkX DiGraph A directed graph Returns ------- cycle_generator: generator A generator that produces elementary cycles of the graph. Each cycle is represented by a list of nodes along the cycle. Examples -------- >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]) >>> len(list(nx.simple_cycles(G))) 5 To filter the cycles so that they don't include certain nodes or edges, copy your graph and eliminate those nodes or edges before calling >>> copyG = G.copy() >>> copyG.remove_nodes_from([1]) >>> copyG.remove_edges_from([(0, 1)]) >>> len(list(nx.simple_cycles(copyG))) 3 Notes ----- The implementation follows pp. 79-80 in [1]_. The time complexity is `O((n+e)(c+1))` for `n` nodes, `e` edges and `c` elementary circuits. References ---------- .. [1] Finding all the elementary circuits of a directed graph. D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. http://dx.doi.org/10.1137/0204007 .. [2] Enumerating the cycles of a digraph: a new preprocessing strategy. G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982. .. [3] A search strategy for the elementary cycles of a directed graph. J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS, v. 16, no. 2, 192-204, 1976. See Also -------- cycle_basis """ def _unblock(thisnode,blocked,B): stack=set([thisnode]) while stack: node=stack.pop() if node in blocked: blocked.remove(node) stack.update(B[node]) B[node].clear() # Johnson's algorithm requires some ordering of the nodes. # We assign the arbitrary ordering given by the strongly connected comps # There is no need to track the ordering as each node removed as processed. subG = type(G)(G.edges()) # save the actual graph so we can mutate it here # We only take the edges because we do not want to # copy edge and node attributes here. sccs = list(nx.strongly_connected_components(subG)) while sccs: scc=sccs.pop() # order of scc determines ordering of nodes startnode = scc.pop() # Processing node runs "circuit" routine from recursive version path=[startnode] blocked = set() # vertex: blocked from search? closed = set() # nodes involved in a cycle blocked.add(startnode) B=defaultdict(set) # graph portions that yield no elementary circuit stack=[ (startnode,list(subG[startnode])) ] # subG gives component nbrs while stack: thisnode,nbrs = stack[-1] if nbrs: nextnode = nbrs.pop() # print thisnode,nbrs,":",nextnode,blocked,B,path,stack,startnode # f=raw_input("pause") if nextnode == startnode: yield path[:] closed.update(path) # print "Found a cycle",path,closed elif nextnode not in blocked: path.append(nextnode) stack.append( (nextnode,list(subG[nextnode])) ) closed.discard(nextnode) blocked.add(nextnode) continue # done with nextnode... look for more neighbors if not nbrs: # no more nbrs if thisnode in closed: _unblock(thisnode,blocked,B) else: for nbr in subG[thisnode]: if thisnode not in B[nbr]: B[nbr].add(thisnode) stack.pop() # assert path[-1]==thisnode path.pop() # done processing this node subG.remove_node(startnode) H=subG.subgraph(scc) # make smaller to avoid work in SCC routine sccs.extend(list(nx.strongly_connected_components(H))) @not_implemented_for('undirected') def recursive_simple_cycles(G): """Find simple cycles (elementary circuits) of a directed graph. A `simple cycle`, or `elementary circuit`, is a closed path where no node appears twice. Two elementary circuits are distinct if they are not cyclic permutations of each other. This version uses a recursive algorithm to build a list of cycles. You should probably use the iterator version called simple_cycles(). Warning: This recursive version uses lots of RAM! Parameters ---------- G : NetworkX DiGraph A directed graph Returns ------- A list of cycles, where each cycle is represented by a list of nodes along the cycle. Example: >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]) >>> nx.recursive_simple_cycles(G) [[0], [0, 1, 2], [0, 2], [1, 2], [2]] See Also -------- cycle_basis (for undirected graphs) Notes ----- The implementation follows pp. 79-80 in [1]_. The time complexity is `O((n+e)(c+1))` for `n` nodes, `e` edges and `c` elementary circuits. References ---------- .. [1] Finding all the elementary circuits of a directed graph. D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. http://dx.doi.org/10.1137/0204007 See Also -------- simple_cycles, cycle_basis """ # Jon Olav Vik, 2010-08-09 def _unblock(thisnode): """Recursively unblock and remove nodes from B[thisnode].""" if blocked[thisnode]: blocked[thisnode] = False while B[thisnode]: _unblock(B[thisnode].pop()) def circuit(thisnode, startnode, component): closed = False # set to True if elementary path is closed path.append(thisnode) blocked[thisnode] = True for nextnode in component[thisnode]: # direct successors of thisnode if nextnode == startnode: result.append(path[:]) closed = True elif not blocked[nextnode]: if circuit(nextnode, startnode, component): closed = True if closed: _unblock(thisnode) else: for nextnode in component[thisnode]: if thisnode not in B[nextnode]: # TODO: use set for speedup? B[nextnode].append(thisnode) path.pop() # remove thisnode from path return closed path = [] # stack of nodes in current path blocked = defaultdict(bool) # vertex: blocked from search? B = defaultdict(list) # graph portions that yield no elementary circuit result = [] # list to accumulate the circuits found # Johnson's algorithm requires some ordering of the nodes. # They might not be sortable so we assign an arbitrary ordering. ordering=dict(zip(G,range(len(G)))) for s in ordering: # Build the subgraph induced by s and following nodes in the ordering subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s]) # Find the strongly connected component in the subgraph # that contains the least node according to the ordering strongcomp = nx.strongly_connected_components(subgraph) mincomp=min(strongcomp, key=lambda nodes: min(ordering[n] for n in nodes)) component = G.subgraph(mincomp) if component: # smallest node in the component according to the ordering startnode = min(component,key=ordering.__getitem__) for node in component: blocked[node] = False B[node][:] = [] dummy=circuit(startnode, startnode, component) return result def find_cycle(G, source=None, orientation='original'): """ Returns the edges of a cycle found via a directed, depth-first traversal. Parameters ---------- G : graph A directed/undirected graph/multigraph. source : node, list of nodes The node from which the traversal begins. If None, then a source is chosen arbitrarily and repeatedly until all edges from each node in the graph are searched. orientation : 'original' | 'reverse' | 'ignore' For directed graphs and directed multigraphs, edge traversals need not respect the original orientation of the edges. When set to 'reverse', then every edge will be traversed in the reverse direction. When set to 'ignore', then each directed edge is treated as a single undirected edge that can be traversed in either direction. For undirected graphs and undirected multigraphs, this parameter is meaningless and is not consulted by the algorithm. Returns ------- edges : directed edges A list of directed edges indicating the path taken for the loop. If no cycle is found, then an exception is raised. For graphs, an edge is of the form `(u, v)` where `u` and `v` are the tail and head of the edge as determined by the traversal. For multigraphs, an edge is of the form `(u, v, key)`, where `key` is the key of the edge. When the graph is directed, then `u` and `v` are always in the order of the actual directed edge. If orientation is 'ignore', then an edge takes the form `(u, v, key, direction)` where direction indicates if the edge was followed in the forward (tail to head) or reverse (head to tail) direction. When the direction is forward, the value of `direction` is 'forward'. When the direction is reverse, the value of `direction` is 'reverse'. Raises ------ NetworkXNoCycle If no cycle was found. Examples -------- In this example, we construct a DAG and find, in the first call, that there are no directed cycles, and so an exception is raised. In the second call, we ignore edge orientations and find that there is an undirected cycle. Note that the second call finds a directed cycle while effectively traversing an undirected graph, and so, we found an "undirected cycle". This means that this DAG structure does not form a directed tree (which is also known as a polytree). >>> import networkx as nx >>> G = nx.DiGraph([(0,1), (0,2), (1,2)]) >>> try: ... find_cycle(G, orientation='original') ... except: ... pass ... >>> list(find_cycle(G, orientation='ignore')) [(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')] """ out_edge, key, tailhead = helper_funcs(G, orientation) explored = set() cycle = [] final_node = None for start_node in G.nbunch_iter(source): if start_node in explored: # No loop is possible. continue edges = [] # All nodes seen in this iteration of edge_dfs seen = {start_node} # Nodes in active path. active_nodes = {start_node} previous_node = None for edge in edge_dfs(G, start_node, orientation): # Determine if this edge is a continuation of the active path. tail, head = tailhead(edge) if previous_node is not None and tail != previous_node: # This edge results from backtracking. # Pop until we get a node whose head equals the current tail. # So for example, we might have: # (0,1), (1,2), (2,3), (1,4) # which must become: # (0,1), (1,4) while True: try: popped_edge = edges.pop() except IndexError: edges = [] active_nodes = {tail} break else: popped_head = tailhead(popped_edge)[1] active_nodes.remove(popped_head) if edges: last_head = tailhead(edges[-1])[1] if tail == last_head: break edges.append(edge) if head in active_nodes: # We have a loop! cycle.extend(edges) final_node = head break elif head in explored: # Then we've already explored it. No loop is possible. break else: seen.add(head) active_nodes.add(head) previous_node = head if cycle: break else: explored.update(seen) else: assert(len(cycle) == 0) raise nx.exception.NetworkXNoCycle('No cycle found.') # We now have a list of edges which ends on a cycle. # So we need to remove from the beginning edges that are not relevant. for i, edge in enumerate(cycle): tail, head = tailhead(edge) if tail == final_node: break return cycle[i:]
bsd-3-clause
hzruandd/AutobahnPython
examples/twisted/websocket/echo/client_coroutines.py
9
2681
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### from autobahn.twisted.websocket import WebSocketClientProtocol, \ WebSocketClientFactory from twisted.internet.defer import Deferred, inlineCallbacks def sleep(delay): d = Deferred() reactor.callLater(delay, d.callback, None) return d class MyClientProtocol(WebSocketClientProtocol): def onConnect(self, response): print("Server connected: {0}".format(response.peer)) @inlineCallbacks def onOpen(self): print("WebSocket connection open.") # start sending messages every second .. while True: self.sendMessage(u"Hello, world!".encode('utf8')) self.sendMessage(b"\x00\x01\x03\x04", isBinary=True) yield sleep(1) def onMessage(self, payload, isBinary): if isBinary: print("Binary message received: {0} bytes".format(len(payload))) else: print("Text message received: {0}".format(payload.decode('utf8'))) def onClose(self, wasClean, code, reason): print("WebSocket connection closed: {0}".format(reason)) if __name__ == '__main__': import sys from twisted.python import log from twisted.internet import reactor log.startLogging(sys.stdout) factory = WebSocketClientFactory(u"ws://127.0.0.1:9000", debug=False) factory.protocol = MyClientProtocol reactor.connectTCP("127.0.0.1", 9000, factory) reactor.run()
mit
kushalbhola/MyStuff
Practice/PythonApplication/env/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py
156
2945
from __future__ import absolute_import, division, unicode_literals from . import base class Filter(base.Filter): """Injects ``<meta charset=ENCODING>`` tag into head of document""" def __init__(self, source, encoding): """Creates a Filter :arg source: the source token stream :arg encoding: the encoding to set """ base.Filter.__init__(self, source) self.encoding = encoding def __iter__(self): state = "pre_head" meta_found = (self.encoding is None) pending = [] for token in base.Filter.__iter__(self): type = token["type"] if type == "StartTag": if token["name"].lower() == "head": state = "in_head" elif type == "EmptyTag": if token["name"].lower() == "meta": # replace charset with actual encoding has_http_equiv_content_type = False for (namespace, name), value in token["data"].items(): if namespace is not None: continue elif name.lower() == 'charset': token["data"][(namespace, name)] = self.encoding meta_found = True break elif name == 'http-equiv' and value.lower() == 'content-type': has_http_equiv_content_type = True else: if has_http_equiv_content_type and (None, "content") in token["data"]: token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding meta_found = True elif token["name"].lower() == "head" and not meta_found: # insert meta into empty head yield {"type": "StartTag", "name": "head", "data": token["data"]} yield {"type": "EmptyTag", "name": "meta", "data": {(None, "charset"): self.encoding}} yield {"type": "EndTag", "name": "head"} meta_found = True continue elif type == "EndTag": if token["name"].lower() == "head" and pending: # insert meta into head (if necessary) and flush pending queue yield pending.pop(0) if not meta_found: yield {"type": "EmptyTag", "name": "meta", "data": {(None, "charset"): self.encoding}} while pending: yield pending.pop(0) meta_found = True state = "post_head" if state == "in_head": pending.append(token) else: yield token
apache-2.0
hortonworks/hortonworks-sandbox
apps/beeswax/src/beeswax/conf.py
1
4895
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os.path from django.utils.translation import ugettext_lazy as _ from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_bool QUERY_SERVERS = UnspecifiedConfigSection( "query_servers", help=_("One entry for each Query Server that can execute some queries."), each=ConfigSection( help=_("Information about a single Query Server"), members=dict( SERVER_HOST = Config( key="server_host", help=_("Host where the Query Server Thrift daemon is running."), private=True, default="localhost"), SERVER_PORT = Config( key="server_port", help=_("Configure the port the Query Server Thrift server."), default=8002, type=int), SUPPORT_DDL = Config( key='support_ddl', default=True, type=coerce_bool, help=_('If DDL queries are supported (e.g. DROP can be sent directly to this server).')) ) ) ) # Deprecated! To remove in Hue 3 # Multiple sections are now available in QUERY_SERVERS BEESWAX_SERVER_HOST = Config( key="beeswax_server_host", help=_("Host where Beeswax server Thrift daemon is running."), private=True, default="localhost") # Deprecated! To remove in Hue 3 # Multiple sections are now available in QUERY_SERVERS BEESWAX_SERVER_PORT = Config( key="beeswax_server_port", help=_("Configure the port the Beeswax Thrift server runs on."), default=8002, type=int) BEESWAX_META_SERVER_HOST = Config( key="beeswax_meta_server_host", help=_("Host where Beeswax internal metastore Thrift daemon is running."), private=True, default="localhost") BEESWAX_META_SERVER_PORT = Config( key="beeswax_meta_server_port", help=_("Configure the port the internal metastore daemon runs on. Used only if " "hive.metastore.local is true."), default=8003, type=int) BEESWAX_SERVER_BIN = Config( key="beeswax_server_bin", help=_("Path to beeswax_server.sh"), private=True, default=os.path.join(os.path.dirname(__file__), "..", "..", "beeswax_server.sh")) BEESWAX_SERVER_HEAPSIZE = Config( key="beeswax_server_heapsize", help=_("Maximum Java heapsize (in megabytes) used by Beeswax Server. " + \ "Note that the setting of HADOOP_HEAPSIZE in $HADOOP_CONF_DIR/hadoop-env.sh " + \ "may override this setting."), default="1000") BEESWAX_HIVE_HOME_DIR = Config( key="hive_home_dir", default=os.environ.get("HIVE_HOME", "/usr/lib/hive"), help=_("Path to the root of the Hive installation; " + "defaults to environment variable when not set.")) BEESWAX_HIVE_CONF_DIR = Config( key='hive_conf_dir', help=_('Hive configuration directory, where hive-site.xml is located.'), default=os.environ.get("HIVE_CONF_DIR", '/etc/hive/conf')) LOCAL_EXAMPLES_DATA_DIR = Config( key='local_examples_data_dir', default=os.path.join(os.path.dirname(__file__), "..", "..", "data"), help=_('The local filesystem path containing the Beeswax examples.')) BEESWAX_SERVER_CONN_TIMEOUT = Config( key='beeswax_server_conn_timeout', default=120, type=int, help=_('Timeout in seconds for Thrift calls to Beeswax service.')) METASTORE_CONN_TIMEOUT= Config( key='metastore_conn_timeout', default=10, type=int, help=_('Timeouts in seconds for Thrift calls to the Hive metastore. This timeout should take into account that the metastore could talk to an external database.')) BEESWAX_RUNNING_QUERY_LIFETIME = Config( key='beeswax_running_query_lifetime', default=604800000L, # 7*24*60*60*1000 (1 week) type=long, help=_('Time in seconds for Beeswax to persist queries in its cache.')) BROWSE_PARTITIONED_TABLE_LIMIT = Config( key='browse_partitioned_table_limit', default=250, type=int, help=_('Set a LIMIT clause when browsing a partitioned table. A positive value will be set as the LIMIT. If 0 or negative, do not set any limit.')) SHARE_SAVED_QUERIES = Config( key='share_saved_queries', default=True, type=coerce_bool, help=_('Share saved queries with all users. If set to false, saved queries are visible only to the owner and administrators.'))
apache-2.0
giorgiop/scipy
scipy/weave/inline_tools.py
97
21706
# should re-write compiled functions to take a local and global dict # as input. from __future__ import absolute_import, print_function import sys import os from . import ext_tools from . import catalog from . import common_info from numpy.core.multiarray import _get_ndarray_c_version ndarray_api_version = '/* NDARRAY API VERSION %x */' % (_get_ndarray_c_version(),) # not an easy way for the user_path_list to come in here. # the PYTHONCOMPILED environment variable offers the most hope. # If the user sets ``os.environ['PYTHONCOMPILED']``, that path will # be used to compile the extension in. Note that .cpp and .so files # will remain in that directory. See the docstring of ``catalog.catalog`` # for more details. function_catalog = catalog.catalog() class inline_ext_function(ext_tools.ext_function): # Some specialization is needed for inline extension functions def function_declaration_code(self): code = 'static PyObject* %s(PyObject*self, PyObject* args)\n{\n' return code % self.name def template_declaration_code(self): code = 'template<class T>\n' \ 'static PyObject* %s(PyObject*self, PyObject* args)\n{\n' return code % self.name def parse_tuple_code(self): """ Create code block for PyArg_ParseTuple. Variable declarations for all PyObjects are done also. This code got a lot uglier when I added local_dict... """ declare_return = 'py::object return_val;\n' \ 'int exception_occurred = 0;\n' \ 'PyObject *py__locals = NULL;\n' \ 'PyObject *py__globals = NULL;\n' py_objects = ', '.join(self.arg_specs.py_pointers()) if py_objects: declare_py_objects = 'PyObject ' + py_objects + ';\n' else: declare_py_objects = '' py_vars = ' = '.join(self.arg_specs.py_variables()) if py_vars: init_values = py_vars + ' = NULL;\n\n' else: init_values = '' parse_tuple = 'if(!PyArg_ParseTuple(args,"OO:compiled_func",'\ '&py__locals,'\ '&py__globals))\n'\ ' return NULL;\n' return declare_return + declare_py_objects + \ init_values + parse_tuple def arg_declaration_code(self): """Return the declaration code as a string.""" arg_strings = [arg.declaration_code(inline=1) for arg in self.arg_specs] return "".join(arg_strings) def arg_cleanup_code(self): """Return the cleanup code as a string.""" arg_strings = [arg.cleanup_code() for arg in self.arg_specs] return "".join(arg_strings) def arg_local_dict_code(self): """Return the code to create the local dict as a string.""" arg_strings = [arg.local_dict_code() for arg in self.arg_specs] return "".join(arg_strings) def function_code(self): from .ext_tools import indent decl_code = indent(self.arg_declaration_code(),4) cleanup_code = indent(self.arg_cleanup_code(),4) function_code = indent(self.code_block,4) # local_dict_code = indent(self.arg_local_dict_code(),4) try_code = \ ' try \n' \ ' { \n' \ '#if defined(__GNUC__) || defined(__ICC)\n' \ ' PyObject* raw_locals __attribute__ ((unused));\n' \ ' PyObject* raw_globals __attribute__ ((unused));\n' \ '#else\n' \ ' PyObject* raw_locals;\n' \ ' PyObject* raw_globals;\n' \ '#endif\n' \ ' raw_locals = py_to_raw_dict(py__locals,"_locals");\n' \ ' raw_globals = py_to_raw_dict(py__globals,"_globals");\n' \ ' /* argument conversion code */ \n' \ + decl_code + \ ' /* inline code */ \n' \ + function_code + \ ' /*I would like to fill in changed locals and globals here...*/ \n' \ ' }\n' catch_code = "catch(...) \n" \ "{ \n" + \ " return_val = py::object(); \n" \ " exception_occurred = 1; \n" \ "} \n" return_code = " /* cleanup code */ \n" + \ cleanup_code + \ " if(!(PyObject*)return_val && !exception_occurred)\n" \ " {\n \n" \ " return_val = Py_None; \n" \ " }\n \n" \ " return return_val.disown(); \n" \ "} \n" all_code = self.function_declaration_code() + \ indent(self.parse_tuple_code(),4) + \ try_code + \ indent(catch_code,4) + \ return_code return all_code def python_function_definition_code(self): args = (self.name, self.name) function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS},\n' % args return function_decls class inline_ext_module(ext_tools.ext_module): def __init__(self,name,compiler=''): ext_tools.ext_module.__init__(self,name,compiler) self._build_information.append(common_info.inline_info()) function_cache = {} def inline(code,arg_names=[],local_dict=None, global_dict=None, force=0, compiler='', verbose=0, support_code=None, headers=[], customize=None, type_converters=None, auto_downcast=1, newarr_converter=0, **kw): """ Inline C/C++ code within Python scripts. ``inline()`` compiles and executes C/C++ code on the fly. Variables in the local and global Python scope are also available in the C/C++ code. Values are passed to the C/C++ code by assignment much like variables passed are passed into a standard Python function. Values are returned from the C/C++ code through a special argument called return_val. Also, the contents of mutable objects can be changed within the C/C++ code and the changes remain after the C code exits and returns to Python. inline has quite a few options as listed below. Also, the keyword arguments for distutils extension modules are accepted to specify extra information needed for compiling. Parameters ---------- code : string A string of valid C++ code. It should not specify a return statement. Instead it should assign results that need to be returned to Python in the `return_val`. arg_names : [str], optional A list of Python variable names that should be transferred from Python into the C/C++ code. It defaults to an empty string. local_dict : dict, optional If specified, it is a dictionary of values that should be used as the local scope for the C/C++ code. If local_dict is not specified the local dictionary of the calling function is used. global_dict : dict, optional If specified, it is a dictionary of values that should be used as the global scope for the C/C++ code. If `global_dict` is not specified, the global dictionary of the calling function is used. force : {0, 1}, optional If 1, the C++ code is compiled every time inline is called. This is really only useful for debugging, and probably only useful if your editing `support_code` a lot. compiler : str, optional The name of compiler to use when compiling. On windows, it understands 'msvc' and 'gcc' as well as all the compiler names understood by distutils. On Unix, it'll only understand the values understood by distutils. (I should add 'gcc' though to this). On windows, the compiler defaults to the Microsoft C++ compiler. If this isn't available, it looks for mingw32 (the gcc compiler). On Unix, it'll probably use the same compiler that was used when compiling Python. Cygwin's behavior should be similar. verbose : {0,1,2}, optional Specifies how much information is printed during the compile phase of inlining code. 0 is silent (except on windows with msvc where it still prints some garbage). 1 informs you when compiling starts, finishes, and how long it took. 2 prints out the command lines for the compilation process and can be useful if your having problems getting code to work. Its handy for finding the name of the .cpp file if you need to examine it. verbose has no effect if the compilation isn't necessary. support_code : str, optional A string of valid C++ code declaring extra code that might be needed by your compiled function. This could be declarations of functions, classes, or structures. headers : [str], optional A list of strings specifying header files to use when compiling the code. The list might look like ``["<vector>","'my_header'"]``. Note that the header strings need to be in a form than can be pasted at the end of a ``#include`` statement in the C++ code. customize : base_info.custom_info, optional An alternative way to specify `support_code`, `headers`, etc. needed by the function. See :mod:`scipy.weave.base_info` for more details. (not sure this'll be used much). type_converters : [type converters], optional These guys are what convert Python data types to C/C++ data types. If you'd like to use a different set of type conversions than the default, specify them here. Look in the type conversions section of the main documentation for examples. auto_downcast : {1,0}, optional This only affects functions that have numpy arrays as input variables. Setting this to 1 will cause all floating point values to be cast as float instead of double if all the Numeric arrays are of type float. If even one of the arrays has type double or double complex, all variables maintain their standard types. newarr_converter : int, optional Unused. Other Parameters ---------------- Relevant :mod:`distutils` keywords. These are duplicated from Greg Ward's :class:`distutils.extension.Extension` class for convenience: sources : [string] List of source filenames, relative to the distribution root (where the setup script lives), in Unix form (slash-separated) for portability. Source files may be C, C++, SWIG (.i), platform-specific resource files, or whatever else is recognized by the "build_ext" command as source for a Python extension. .. note:: The `module_path` file is always appended to the front of this list include_dirs : [string] List of directories to search for C/C++ header files (in Unix form for portability). define_macros : [(name : string, value : string|None)] List of macros to define; each macro is defined using a 2-tuple, where 'value' is either the string to define it to or None to define it without a particular value (equivalent of "#define FOO" in source or -DFOO on Unix C compiler command line). undef_macros : [string] List of macros to undefine explicitly. library_dirs : [string] List of directories to search for C/C++ libraries at link time. libraries : [string] List of library names (not filenames or paths) to link against. runtime_library_dirs : [string] List of directories to search for C/C++ libraries at run time (for shared extensions, this is when the extension is loaded). extra_objects : [string] List of extra files to link with (e.g. object files not implied by 'sources', static libraries that must be explicitly specified, binary resource files, etc.) extra_compile_args : [string] Any extra platform- and compiler-specific information to use when compiling the source files in 'sources'. For platforms and compilers where "command line" makes sense, this is typically a list of command-line arguments, but for other platforms it could be anything. extra_link_args : [string] Any extra platform- and compiler-specific information to use when linking object files together to create the extension (or to create a new static Python interpreter). Similar interpretation as for 'extra_compile_args'. export_symbols : [string] List of symbols to be exported from a shared extension. Not used on all platforms, and not generally necessary for Python extensions, which typically export exactly one symbol: "init" + extension_name. swig_opts : [string] Any extra options to pass to SWIG if a source file has the .i extension. depends : [string] List of files that the extension depends on. language : string Extension language (i.e. "c", "c++", "objc"). Will be detected from the source extensions if not provided. See Also -------- distutils.extension.Extension : Describes additional parameters. """ # this grabs the local variables from the *previous* call # frame -- that is the locals from the function that called # inline. global function_catalog call_frame = sys._getframe().f_back if local_dict is None: local_dict = call_frame.f_locals if global_dict is None: global_dict = call_frame.f_globals if force: module_dir = global_dict.get('__file__',None) func = compile_function(code,arg_names,local_dict, global_dict,module_dir, compiler=compiler, verbose=verbose, support_code=support_code, headers=headers, customize=customize, type_converters=type_converters, auto_downcast=auto_downcast, **kw) function_catalog.add_function(code,func,module_dir) results = attempt_function_call(code,local_dict,global_dict) else: # 1. try local cache try: results = apply(function_cache[code],(local_dict,global_dict)) return results except TypeError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise TypeError(msg) except NameError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise NameError(msg) except KeyError: pass # 2. try function catalog try: results = attempt_function_call(code,local_dict,global_dict) # 3. build the function except ValueError: # compile the library module_dir = global_dict.get('__file__',None) func = compile_function(code,arg_names,local_dict, global_dict,module_dir, compiler=compiler, verbose=verbose, support_code=support_code, headers=headers, customize=customize, type_converters=type_converters, auto_downcast=auto_downcast, **kw) function_catalog.add_function(code,func,module_dir) results = attempt_function_call(code,local_dict,global_dict) return results def attempt_function_call(code,local_dict,global_dict): # we try 3 levels here -- a local cache first, then the # catalog cache, and then persistent catalog. # global function_catalog # 1. try local cache try: results = apply(function_cache[code],(local_dict,global_dict)) return results except TypeError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise TypeError(msg) except NameError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise NameError(msg) except KeyError: pass # 2. try catalog cache. function_list = function_catalog.get_functions_fast(code) for func in function_list: try: results = apply(func,(local_dict,global_dict)) function_catalog.fast_cache(code,func) function_cache[code] = func return results except TypeError as msg: # should specify argument types here. # This should really have its own error type, instead of # checking the beginning of the message, but I don't know # how to define that yet. msg = str(msg) if msg[:16] == "Conversion Error": pass else: raise TypeError(msg) except NameError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise NameError(msg) # 3. try persistent catalog module_dir = global_dict.get('__file__',None) function_list = function_catalog.get_functions(code,module_dir) for func in function_list: try: results = apply(func,(local_dict,global_dict)) function_catalog.fast_cache(code,func) function_cache[code] = func return results except: # should specify argument types here. pass # if we get here, the function wasn't found raise ValueError('function with correct signature not found') def inline_function_code(code,arg_names,local_dict=None, global_dict=None,auto_downcast=1, type_converters=None,compiler=''): call_frame = sys._getframe().f_back if local_dict is None: local_dict = call_frame.f_locals if global_dict is None: global_dict = call_frame.f_globals ext_func = inline_ext_function('compiled_func',code,arg_names, local_dict,global_dict,auto_downcast, type_converters=type_converters) from . import build_tools compiler = build_tools.choose_compiler(compiler) ext_func.set_compiler(compiler) return ext_func.function_code() def compile_function(code,arg_names,local_dict,global_dict, module_dir, compiler='', verbose=1, support_code=None, headers=[], customize=None, type_converters=None, auto_downcast=1, **kw): # figure out where to store and what to name the extension module # that will contain the function. # storage_dir = catalog.intermediate_dir() code = ndarray_api_version + '\n' + code module_path = function_catalog.unique_module_name(code, module_dir) storage_dir, module_name = os.path.split(module_path) mod = inline_ext_module(module_name,compiler) # create the function. This relies on the auto_downcast and # type factories setting ext_func = inline_ext_function('compiled_func',code,arg_names, local_dict,global_dict,auto_downcast, type_converters=type_converters) mod.add_function(ext_func) # if customize (a custom_info object), then set the module customization. if customize: mod.customize = customize # add the extra "support code" needed by the function to the module. if support_code: mod.customize.add_support_code(support_code) # add the extra headers needed by the function to the module. for header in headers: mod.customize.add_header(header) # it's nice to let the users know when anything gets compiled, as the # slowdown is very noticeable. if verbose > 0: print('<weave: compiling>') # compile code in correct location, with the given compiler and verbosity # setting. All input keywords are passed through to distutils mod.compile(location=storage_dir,compiler=compiler, verbose=verbose, **kw) # import the module and return the function. Make sure # the directory where it lives is in the python path. try: sys.path.insert(0,storage_dir) exec('import ' + module_name) func = eval(module_name+'.compiled_func') finally: del sys.path[0] return func
bsd-3-clause
JonSteinn/Kattis-Solutions
src/Muzicari/Python 3/main.py
1
1877
from collections import deque, Counter class StopRecursion(Exception): pass class Queue: def __init__(self, t): self.total = t self.used = 0 self.queue = deque() def push(self,index,time): if time + self.used > self.total: return False self.queue.append((index,self.used)) self.used += time return True def undo_push(self,time): self.queue.pop() self.used -= time def pop(self): return self.queue.popleft() def empty(self): return not self.queue class Queues: def __init__(self, t): self.first = Queue(t) self.second = Queue(t) def __iter__(self): yield self.first yield self.second def used(self): return self.first.used, self.second.used def __str__(self): return ' '.join(f'{x}' for _,x in sorted(self.first.queue + self.second.queue, key=lambda z: z[0])) def recursive_search(rest,queues,counter,mem): if not rest: print(queues) raise StopRecursion() stamp = (*queues.used(),counter[rest[-1][1]]) if stamp in mem: return index, time = rest.pop() for queue in queues: if queue.push(index,time): counter[time] -= 1 recursive_search(rest,queues,counter,mem) queue.undo_push(time) counter[time] += 1 rest.append((index,time)) mem.add(stamp) def schedule_rest(t,rest,counter): try: queues = Queues(t) memory = set() recursive_search(rest,queues,counter,memory) except StopRecursion: pass def main(): t,_ = map(int,input().split()) rest = sorted(enumerate(map(int,input().split())),key=lambda z: z[1]) counter = Counter(x for _,x in rest) schedule_rest(t,rest,counter) if __name__ == "__main__": main()
gpl-3.0
volyrique/FrameworkBenchmarks
frameworks/Python/crax/hello/app.py
8
3164
import os from operator import itemgetter from random import randint import asyncpg from crax import Crax from crax.response_types import BaseResponse, JSONResponse from crax.urls import Route, Url from crax.views import JSONView, TemplateView READ_ROW_SQL = 'SELECT "id", "randomnumber" FROM "world" WHERE id = $1' WRITE_ROW_SQL = 'UPDATE "world" SET "randomnumber"=$1 WHERE id=$2' async def setup_database(): global connection_pool connection_pool = await asyncpg.create_pool( user=os.getenv('PGUSER', 'benchmarkdbuser'), password=os.getenv('PGPASS', 'benchmarkdbpass'), database='hello_world', host='tfb-database', port=5432 ) def get_num_queries(request): try: query_count = int(request.query["queries"][0]) except (KeyError, IndexError, ValueError): return 1 if query_count < 1: return 1 if query_count > 500: return 500 return query_count class TestSingleQuery(JSONView): async def get(self): row_id = randint(1, 10000) async with connection_pool.acquire() as connection: if self.request.path == '/db': res = await connection.fetchval(READ_ROW_SQL, row_id) self.context = {'id': row_id, 'randomNumber': res} class TestMultiQueries(JSONView): async def get(self): row_ids = [randint(1, 10000) for _ in range(get_num_queries(self.request))] worlds = [] async with connection_pool.acquire() as connection: statement = await connection.prepare(READ_ROW_SQL) for row_id in row_ids: number = await statement.fetchval(row_id) worlds.append({'id': row_id, 'randomNumber': number}) self.context = worlds class TestUpdates(JSONView): async def get(self): updates = [(randint(1, 10000), randint(1, 10000)) for _ in range(get_num_queries(self.request))] worlds = [{'id': row_id, 'randomNumber': number} for row_id, number in updates] async with connection_pool.acquire() as connection: statement = await connection.prepare(READ_ROW_SQL) for row_id, number in updates: await statement.fetchval(row_id) await connection.executemany(WRITE_ROW_SQL, updates) self.context = worlds class TestSingleFortunes(TemplateView): template = "fortune.html" async def get(self): async with connection_pool.acquire() as connection: fortunes = await connection.fetch('SELECT * FROM Fortune') fortunes.append([0, 'Additional fortune added at request time.']) fortunes.sort(key=itemgetter(1)) self.context["fortunes"] = fortunes APPLICATIONS = ["hello"] URL_PATTERNS = [ Route(Url('/json'), JSONResponse(None, {'message': 'Hello, world!'})), Route(Url('/plaintext'), BaseResponse(None, b'Hello, world!')), Route(Url('/db'), TestSingleQuery), Route(Url('/queries'), TestMultiQueries), Route(Url('/updates'), TestUpdates), Route(Url('/fortunes'), TestSingleFortunes) ] app = Crax('hello.app', debug=True, on_startup=setup_database)
bsd-3-clause
msrb/samba
third_party/dnspython/dns/renderer.py
58
11910
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Help for building DNS wire format messages""" import cStringIO import struct import random import time import dns.exception import dns.tsig QUESTION = 0 ANSWER = 1 AUTHORITY = 2 ADDITIONAL = 3 class Renderer(object): """Helper class for building DNS wire-format messages. Most applications can use the higher-level L{dns.message.Message} class and its to_wire() method to generate wire-format messages. This class is for those applications which need finer control over the generation of messages. Typical use:: r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512) r.add_question(qname, qtype, qclass) r.add_rrset(dns.renderer.ANSWER, rrset_1) r.add_rrset(dns.renderer.ANSWER, rrset_2) r.add_rrset(dns.renderer.AUTHORITY, ns_rrset) r.add_edns(0, 0, 4096) r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1) r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2) r.write_header() r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac) wire = r.get_wire() @ivar output: where rendering is written @type output: cStringIO.StringIO object @ivar id: the message id @type id: int @ivar flags: the message flags @type flags: int @ivar max_size: the maximum size of the message @type max_size: int @ivar origin: the origin to use when rendering relative names @type origin: dns.name.Name object @ivar compress: the compression table @type compress: dict @ivar section: the section currently being rendered @type section: int (dns.renderer.QUESTION, dns.renderer.ANSWER, dns.renderer.AUTHORITY, or dns.renderer.ADDITIONAL) @ivar counts: list of the number of RRs in each section @type counts: int list of length 4 @ivar mac: the MAC of the rendered message (if TSIG was used) @type mac: string """ def __init__(self, id=None, flags=0, max_size=65535, origin=None): """Initialize a new renderer. @param id: the message id @type id: int @param flags: the DNS message flags @type flags: int @param max_size: the maximum message size; the default is 65535. If rendering results in a message greater than I{max_size}, then L{dns.exception.TooBig} will be raised. @type max_size: int @param origin: the origin to use when rendering relative names @type origin: dns.name.Namem or None. """ self.output = cStringIO.StringIO() if id is None: self.id = random.randint(0, 65535) else: self.id = id self.flags = flags self.max_size = max_size self.origin = origin self.compress = {} self.section = QUESTION self.counts = [0, 0, 0, 0] self.output.write('\x00' * 12) self.mac = '' def _rollback(self, where): """Truncate the output buffer at offset I{where}, and remove any compression table entries that pointed beyond the truncation point. @param where: the offset @type where: int """ self.output.seek(where) self.output.truncate() keys_to_delete = [] for k, v in self.compress.iteritems(): if v >= where: keys_to_delete.append(k) for k in keys_to_delete: del self.compress[k] def _set_section(self, section): """Set the renderer's current section. Sections must be rendered order: QUESTION, ANSWER, AUTHORITY, ADDITIONAL. Sections may be empty. @param section: the section @type section: int @raises dns.exception.FormError: an attempt was made to set a section value less than the current section. """ if self.section != section: if self.section > section: raise dns.exception.FormError self.section = section def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN): """Add a question to the message. @param qname: the question name @type qname: dns.name.Name @param rdtype: the question rdata type @type rdtype: int @param rdclass: the question rdata class @type rdclass: int """ self._set_section(QUESTION) before = self.output.tell() qname.to_wire(self.output, self.compress, self.origin) self.output.write(struct.pack("!HH", rdtype, rdclass)) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.counts[QUESTION] += 1 def add_rrset(self, section, rrset, **kw): """Add the rrset to the specified section. Any keyword arguments are passed on to the rdataset's to_wire() routine. @param section: the section @type section: int @param rrset: the rrset @type rrset: dns.rrset.RRset object """ self._set_section(section) before = self.output.tell() n = rrset.to_wire(self.output, self.compress, self.origin, **kw) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.counts[section] += n def add_rdataset(self, section, name, rdataset, **kw): """Add the rdataset to the specified section, using the specified name as the owner name. Any keyword arguments are passed on to the rdataset's to_wire() routine. @param section: the section @type section: int @param name: the owner name @type name: dns.name.Name object @param rdataset: the rdataset @type rdataset: dns.rdataset.Rdataset object """ self._set_section(section) before = self.output.tell() n = rdataset.to_wire(name, self.output, self.compress, self.origin, **kw) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.counts[section] += n def add_edns(self, edns, ednsflags, payload, options=None): """Add an EDNS OPT record to the message. @param edns: The EDNS level to use. @type edns: int @param ednsflags: EDNS flag values. @type ednsflags: int @param payload: The EDNS sender's payload field, which is the maximum size of UDP datagram the sender can handle. @type payload: int @param options: The EDNS options list @type options: list of dns.edns.Option instances @see: RFC 2671 """ # make sure the EDNS version in ednsflags agrees with edns ednsflags &= 0xFF00FFFFL ednsflags |= (edns << 16) self._set_section(ADDITIONAL) before = self.output.tell() self.output.write(struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload, ednsflags, 0)) if not options is None: lstart = self.output.tell() for opt in options: stuff = struct.pack("!HH", opt.otype, 0) self.output.write(stuff) start = self.output.tell() opt.to_wire(self.output) end = self.output.tell() assert end - start < 65536 self.output.seek(start - 2) stuff = struct.pack("!H", end - start) self.output.write(stuff) self.output.seek(0, 2) lend = self.output.tell() assert lend - lstart < 65536 self.output.seek(lstart - 2) stuff = struct.pack("!H", lend - lstart) self.output.write(stuff) self.output.seek(0, 2) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.counts[ADDITIONAL] += 1 def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data, request_mac, algorithm=dns.tsig.default_algorithm): """Add a TSIG signature to the message. @param keyname: the TSIG key name @type keyname: dns.name.Name object @param secret: the secret to use @type secret: string @param fudge: TSIG time fudge @type fudge: int @param id: the message id to encode in the tsig signature @type id: int @param tsig_error: TSIG error code; default is 0. @type tsig_error: int @param other_data: TSIG other data. @type other_data: string @param request_mac: This message is a response to the request which had the specified MAC. @type request_mac: string @param algorithm: the TSIG algorithm to use @type algorithm: dns.name.Name object """ self._set_section(ADDITIONAL) before = self.output.tell() s = self.output.getvalue() (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s, keyname, secret, int(time.time()), fudge, id, tsig_error, other_data, request_mac, algorithm=algorithm) keyname.to_wire(self.output, self.compress, self.origin) self.output.write(struct.pack('!HHIH', dns.rdatatype.TSIG, dns.rdataclass.ANY, 0, 0)) rdata_start = self.output.tell() self.output.write(tsig_rdata) after = self.output.tell() assert after - rdata_start < 65536 if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.output.seek(rdata_start - 2) self.output.write(struct.pack('!H', after - rdata_start)) self.counts[ADDITIONAL] += 1 self.output.seek(10) self.output.write(struct.pack('!H', self.counts[ADDITIONAL])) self.output.seek(0, 2) def write_header(self): """Write the DNS message header. Writing the DNS message header is done asfter all sections have been rendered, but before the optional TSIG signature is added. """ self.output.seek(0) self.output.write(struct.pack('!HHHHHH', self.id, self.flags, self.counts[0], self.counts[1], self.counts[2], self.counts[3])) self.output.seek(0, 2) def get_wire(self): """Return the wire format message. @rtype: string """ return self.output.getvalue()
gpl-3.0
tiagofrepereira2012/bob.extension
bob/extension/boost.py
1
6850
#!/usr/bin/env python # encoding: utf-8 # Andre Anjos <andre.anjos@idiap.ch> # Thu Mar 20 12:38:14 CET 2014 """Helps looking for Boost on stock file-system locations""" import os import re import sys import glob from distutils.version import LooseVersion from .utils import uniq, egrep, find_header, find_library def boost_version(version_hpp): matches = egrep(version_hpp, r"^#\s*define\s+BOOST_VERSION\s+(\d+)\s*$") if not len(matches): return None # we have a match, produce a string version of the version number version_int = int(matches[0].group(1)) version_tuple = ( version_int // 100000, (version_int // 100) % 1000, version_int % 100, ) return '.'.join([str(k) for k in version_tuple]) class boost: """A class for capturing configuration information from boost Example usage: .. doctest:: :options: +NORMALIZE_WHITESPACE +ELLIPSIS >>> from bob.extension import boost >>> pkg = boost('>= 1.35') >>> pkg.include_directory '...' >>> pkg.version '...' You can also use this class to retrieve information about installed Boost libraries and link information: .. doctest:: :options: +NORMALIZE_WHITESPACE +ELLIPSIS >>> from bob.extension import boost >>> pkg = boost('>= 1.35') >>> pkg.libconfig(['python', 'system']) (...) """ def __init__ (self, requirement=''): """ Searches for the Boost library in stock locations. Allows user to override. If the user sets the environment variable BOB_PREFIX_PATH, that prefixes the standard path locations. """ candidates = find_header('version.hpp', subpaths=['boost', 'boost?*']) if not candidates: raise RuntimeError("could not find boost's `version.hpp' - have you installed Boost on this machine?") found = False if not requirement: # since we use boost headers **including the boost/ directory**, we need to go one level lower self.include_directory = os.path.dirname(os.path.dirname(candidates[0])) self.version = boost_version(candidates[0]) found = True else: # requirement is 'operator' 'version' operator, required = [k.strip() for k in requirement.split(' ', 1)] # now check for user requirements for path in candidates: version = boost_version(path) available = LooseVersion(version) if (operator == '<' and available < required) or \ (operator == '<=' and available <= required) or \ (operator == '>' and available > required) or \ (operator == '>=' and available >= required) or \ (operator == '==' and available == required): self.include_directory = path self.version = version found = True break if not found: raise RuntimeError("could not find the required (%s) version of boost on the file system (looked at: %s)" % (requirement, ', '.join(candidates))) # normalize self.include_directory = os.path.normpath(self.include_directory) def libconfig(self, modules, only_static=False, templates=['boost_%(name)s-mt-%(py)s', 'boost_%(name)s-%(py)s', 'boost_%(name)s-mt', 'boost_%(name)s']): """Returns a tuple containing the library configuration for requested modules. This function respects the path location where the include files for Boost are installed. Parameters: modules (list of strings) A list of string specifying the requested libraries to search for. For example, to search for `libboost_mpi.so`, pass only ``mpi``. static (bool) A boolean, indicating if we should try only to search for static versions of the libraries. If not set, any would do. templates (list of template strings) A list that defines in which order to search for libraries on the default search path, defined by ``self.include_directory``. Tune this list if you have compiled specific versions of Boost with support to multi-threading (``-mt``), debug (``-g``), STLPORT (``-p``) or required to insert compiler, the underlying thread API used or your own namespace. Here are the keywords you can use: %(name)s resolves to the module name you are searching for %(ver)s resolves to the current boost version string (e.g. ``'1.50.0'``) %(py)s resolves to the string ``'pyXY'`` where ``XY`` represent the major and minor versions of the current python interpreter. Example templates: * ``'boost_%(name)s-mt'`` * ``'boost_%(name)s'`` * ``'boost_%(name)s-gcc43-%(ver)s'`` Returns: directories (list of strings) A list of directories indicating where the libraries are installed libs (list of strings) A list of strings indicating the names of the libraries you can use """ # make the include header prefix preferential prefix = os.path.dirname(self.include_directory) py = 'py%d%d' % sys.version_info[:2] filenames = [] for module in modules: candidates = [] modnames = [k % dict(name=module, ver=self.version, py=py) for k in templates] for modname in modnames: candidates += find_library(modname, version=self.version, prefixes=[prefix], only_static=only_static) if not candidates: raise RuntimeError("cannot find required boost module `%s' - make sure boost is installed on `%s' and that this module is named %s on the filesystem" % (module, prefix, ' or '.join(modnames))) # take the first choice that includes the prefix (or the absolute first choice otherwise) index = 0 for i, candidate in enumerate(candidates): if candidate.find(prefix) == 0: index = i break filenames.append(candidates[index]) # libraries libraries = [] for f in filenames: name, ext = os.path.splitext(os.path.basename(f)) if ext in ['.so', '.a', '.dylib', '.dll']: libraries.append(name[3:]) #strip 'lib' from the name else: #link against the whole thing libraries.append(':' + os.path.basename(f)) # library paths libpaths = [os.path.dirname(k) for k in filenames] return uniq(libpaths), uniq(libraries) def macros(self): """Returns package availability and version number macros This method returns a python list with 2 macros indicating package availability and a version number, using standard GNU compatible names. Example: .. doctest:: :options: +NORMALIZE_WHITESPACE +ELLIPSIS >>> from bob.extension import boost >>> pkg = boost('>= 1.34') >>> pkg.macros() [('HAVE_BOOST', '1'), ('BOOST_VERSION', '"..."')] """ return [('HAVE_BOOST', '1'), ('BOOST_VERSION', '"%s"' % self.version)]
bsd-3-clause
CESNET/pOCCI
doc/conf.py
3
7678
# -*- coding: utf-8 -*- # import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) import pOCCI # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pOCCI' copyright = u'2015, CESNET' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # version = pOCCI.__version__ release = pOCCI.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'poccidoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'pocci.tex', u'pOCCI Documentation', u'CESNET', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pocci', u'pOCCI Documentation', [u'CESNET'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'pocci', u'pOCCI Documentation', u'CESNET', 'pocci', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
mit
kcpawan/django
tests/get_object_or_404/models.py
409
1133
""" DB-API Shortcuts ``get_object_or_404()`` is a shortcut function to be used in view functions for performing a ``get()`` lookup and raising a ``Http404`` exception if a ``DoesNotExist`` exception was raised during the ``get()`` call. ``get_list_or_404()`` is a shortcut function to be used in view functions for performing a ``filter()`` lookup and raising a ``Http404`` exception if a ``DoesNotExist`` exception was raised during the ``filter()`` call. """ from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Author(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name class ArticleManager(models.Manager): def get_queryset(self): return super(ArticleManager, self).get_queryset().filter(authors__name__icontains='sir') @python_2_unicode_compatible class Article(models.Model): authors = models.ManyToManyField(Author) title = models.CharField(max_length=50) objects = models.Manager() by_a_sir = ArticleManager() def __str__(self): return self.title
bsd-3-clause
pipermerriam/django
tests/ordering/models.py
261
1379
""" Specifying ordering Specify default ordering for a model using the ``ordering`` attribute, which should be a list or tuple of field names. This tells Django how to order ``QuerySet`` results. If a field name in ``ordering`` starts with a hyphen, that field will be ordered in descending order. Otherwise, it'll be ordered in ascending order. The special-case field name ``"?"`` specifies random order. The ordering attribute is not required. If you leave it off, ordering will be undefined -- not random, just undefined. """ from django.db import models from django.utils.encoding import python_2_unicode_compatible class Author(models.Model): class Meta: ordering = ('-pk',) @python_2_unicode_compatible class Article(models.Model): author = models.ForeignKey(Author, models.SET_NULL, null=True) second_author = models.ForeignKey(Author, models.SET_NULL, null=True) headline = models.CharField(max_length=100) pub_date = models.DateTimeField() class Meta: ordering = ('-pub_date', 'headline') def __str__(self): return self.headline class OrderedByAuthorArticle(Article): class Meta: proxy = True ordering = ('author', 'second_author') class Reference(models.Model): article = models.ForeignKey(OrderedByAuthorArticle, models.CASCADE) class Meta: ordering = ('article',)
bsd-3-clause
ryano144/intellij-community
python/lib/Lib/encodings/iso2022_jp_3.py
816
1061
# # iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3 # # Written by Hye-Shik Chang <perky@FreeBSD.org> # import _codecs_iso2022, codecs import _multibytecodec as mbc codec = _codecs_iso2022.getcodec('iso2022_jp_3') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='iso2022_jp_3', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
apache-2.0
lihui7115/ChromiumGStreamerBackend
chrome/common/extensions/docs/server2/content_provider_test.py
77
7815
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from cStringIO import StringIO import json import unittest from zipfile import ZipFile from compiled_file_system import CompiledFileSystem from content_provider import ContentProvider from file_system import FileNotFoundError from object_store_creator import ObjectStoreCreator from path_canonicalizer import PathCanonicalizer from test_file_system import TestFileSystem from third_party.motemplate import Motemplate _REDIRECTS_JSON = json.dumps({ 'oldfile.html': 'storage.html', 'index.html': 'https://developers.google.com/chrome', }) _MARKDOWN_CONTENT = ( ('# Header 1 #', u'<h1 id="header-1">Header 1</h1>'), ('1. Foo\n', u'<ol>\n<li>Foo</li>\n</ol>'), ('![alt text](/path/img.jpg "Title")\n', '<p><img alt="alt text" src="/path/img.jpg" title="Title" /></p>'), ('* Unordered item 1', u'<ul>\n<li>Unordered item 1</li>\n</ul>') ) # Test file system data which exercises many different mimetypes. _TEST_DATA = { 'dir': { 'a.txt': 'a.txt content', 'b.txt': 'b.txt content', 'c': { 'd.txt': 'd.txt content', }, }, 'dir2': { 'dir3': { 'a.txt': 'a.txt content', 'b.txt': 'b.txt content', 'c': { 'd.txt': 'd.txt content', }, }, }, 'dir4': { 'index.html': 'index.html content 1' }, 'dir5': { 'index.html': 'index.html content 2' }, 'dir6': { 'notindex.html': 'notindex.html content' }, 'dir7': { 'index.md': '\n'.join(text[0] for text in _MARKDOWN_CONTENT) }, 'dir.txt': 'dir.txt content', 'dir5.html': 'dir5.html content', 'img.png': 'img.png content', 'index.html': 'index.html content', 'read.txt': 'read.txt content', 'redirects.json': _REDIRECTS_JSON, 'noextension': 'noextension content', 'run.js': 'run.js content', 'site.css': 'site.css content', 'storage.html': 'storage.html content', 'markdown.md': '\n'.join(text[0] for text in _MARKDOWN_CONTENT) } class ContentProviderUnittest(unittest.TestCase): def setUp(self): self._test_file_system = TestFileSystem(_TEST_DATA) self._content_provider = self._CreateContentProvider() def _CreateContentProvider(self, supports_zip=False): object_store_creator = ObjectStoreCreator.ForTest() return ContentProvider( 'foo', CompiledFileSystem.Factory(object_store_creator), self._test_file_system, object_store_creator, default_extensions=('.html', '.md'), # TODO(kalman): Test supports_templates=False. supports_templates=True, supports_zip=supports_zip) def _assertContent(self, content, content_type, content_and_type): # Assert type so that str is differentiated from unicode. self.assertEqual(type(content), type(content_and_type.content)) self.assertEqual(content, content_and_type.content) self.assertEqual(content_type, content_and_type.content_type) def _assertTemplateContent(self, content, path, version): content_and_type = self._content_provider.GetContentAndType(path).Get() self.assertEqual(Motemplate, type(content_and_type.content)) content_and_type.content = content_and_type.content.source self._assertContent(content, 'text/html', content_and_type) self.assertEqual(version, self._content_provider.GetVersion(path).Get()) def _assertMarkdownContent(self, content, path, version): content_and_type = self._content_provider.GetContentAndType(path).Get() content_and_type.content = content_and_type.content.source self._assertContent(content, 'text/html', content_and_type) self.assertEqual(version, self._content_provider.GetVersion(path).Get()) def testPlainText(self): self._assertContent( u'a.txt content', 'text/plain', self._content_provider.GetContentAndType('dir/a.txt').Get()) self._assertContent( u'd.txt content', 'text/plain', self._content_provider.GetContentAndType('dir/c/d.txt').Get()) self._assertContent( u'read.txt content', 'text/plain', self._content_provider.GetContentAndType('read.txt').Get()) self._assertContent( unicode(_REDIRECTS_JSON, 'utf-8'), 'application/json', self._content_provider.GetContentAndType('redirects.json').Get()) self._assertContent( u'run.js content', 'application/javascript', self._content_provider.GetContentAndType('run.js').Get()) self._assertContent( u'site.css content', 'text/css', self._content_provider.GetContentAndType('site.css').Get()) def testTemplate(self): self._assertTemplateContent(u'storage.html content', 'storage.html', '0') self._test_file_system.IncrementStat('storage.html') self._assertTemplateContent(u'storage.html content', 'storage.html', '1') def testImage(self): self._assertContent( 'img.png content', 'image/png', self._content_provider.GetContentAndType('img.png').Get()) def testZipTopLevel(self): zip_content_provider = self._CreateContentProvider(supports_zip=True) content_and_type = zip_content_provider.GetContentAndType('dir.zip').Get() zipfile = ZipFile(StringIO(content_and_type.content)) content_and_type.content = zipfile.namelist() self._assertContent( ['dir/a.txt', 'dir/b.txt', 'dir/c/d.txt'], 'application/zip', content_and_type) def testZip2ndLevel(self): zip_content_provider = self._CreateContentProvider(supports_zip=True) content_and_type = zip_content_provider.GetContentAndType( 'dir2/dir3.zip').Get() zipfile = ZipFile(StringIO(content_and_type.content)) content_and_type.content = zipfile.namelist() self._assertContent( ['dir3/a.txt', 'dir3/b.txt', 'dir3/c/d.txt'], 'application/zip', content_and_type) def testCanonicalZipPaths(self): # Without supports_zip the path is canonicalized as a file. self.assertEqual( 'dir.txt', self._content_provider.GetCanonicalPath('dir.zip')) self.assertEqual( 'dir.txt', self._content_provider.GetCanonicalPath('diR.zip')) # With supports_zip the path is canonicalized as the zip file which # corresponds to the canonical directory. zip_content_provider = self._CreateContentProvider(supports_zip=True) self.assertEqual( 'dir.zip', zip_content_provider.GetCanonicalPath('dir.zip')) self.assertEqual( 'dir.zip', zip_content_provider.GetCanonicalPath('diR.zip')) def testMarkdown(self): expected_content = '\n'.join(text[1] for text in _MARKDOWN_CONTENT) self._assertMarkdownContent(expected_content, 'markdown', '0') self._test_file_system.IncrementStat('markdown.md') self._assertMarkdownContent(expected_content, 'markdown', '1') def testNotFound(self): self.assertRaises( FileNotFoundError, self._content_provider.GetContentAndType('oops').Get) def testIndexRedirect(self): self._assertTemplateContent(u'index.html content', '', '0') self._assertTemplateContent(u'index.html content 1', 'dir4', '0') self._assertTemplateContent(u'dir5.html content', 'dir5', '0') self._assertMarkdownContent( '\n'.join(text[1] for text in _MARKDOWN_CONTENT), 'dir7', '0') self._assertContent( 'noextension content', 'text/plain', self._content_provider.GetContentAndType('noextension').Get()) self.assertRaises( FileNotFoundError, self._content_provider.GetContentAndType('dir6').Get) def testRefresh(self): # Not entirely sure what to test here, but get some code coverage. self._content_provider.Refresh().Get() if __name__ == '__main__': unittest.main()
bsd-3-clause
fthuin/artificial-intelligence
assignment2/aima-python3/utils.py
3
2677
"""Provide some widely useful utilities. "from utils import *". """ import bisect #______________________________________________________________________________ # Queues: Stack, FIFOQueue, PriorityQueue class Queue: """Queue is an abstract class/interface. There are three types: Stack(): A Last In First Out Queue. FIFOQueue(): A First In First Out Queue. PriorityQueue(lt): Queue where items are sorted by lt, (default <). Each type supports the following methods and functions: q.append(item) -- add an item to the queue q.extend(items) -- equivalent to: for item in items: q.append(item) q.pop() -- return the top item from the queue len(q) -- number of items in q (also q.__len()) Note that isinstance(Stack(), Queue) is false, because we implement stacks as lists. If Python ever gets interfaces, Queue will be an interface.""" def __init__(self): abstract def extend(self, items): for item in items: self.append(item) def Stack(): """Return an empty list, suitable as a Last-In-First-Out Queue.""" return [] class FIFOQueue(Queue): """A First-In-First-Out Queue.""" def __init__(self): self.A = []; self.start = 0 def append(self, item): self.A.append(item) def __len__(self): return len(self.A) - self.start def extend(self, items): self.A.extend(items) def pop(self): e = self.A[self.start] self.start += 1 if self.start > 5 and self.start > len(self.A)/2: self.A = self.A[self.start:] self.start = 0 return e class PriorityQueueElmt: """ The elements of the priority queue """ def __init__(self,val,e): self.val = val self.e = e def __lt__(self,other): return self.val < other.val def value(self): return self.val def elem(self): return self.e class PriorityQueue(Queue): """A queue in which the minimum (or maximum) element (as determined by f and order) is returned first. If order is min, the item with minimum f(x) is returned first; if order is max, then it is the item with maximum f(x).""" def __init__(self, f, order=min): self.A=[] self.order=order self.f=f def append(self, item): queueElmt = PriorityQueueElmt(self.f(item),item) bisect.insort(self.A, queueElmt) def __len__(self): return len(self.A) def pop(self): if self.order == min: return self.A.pop(0).elem() else: return self.A.pop().elem()
mit
jaez/finna-be-octo-robot
share/qt/make_spinner.py
4415
1035
#!/usr/bin/env python # W.J. van der Laan, 2011 # Make spinning .mng animation from a .png # Requires imagemagick 6.7+ from __future__ import division from os import path from PIL import Image from subprocess import Popen SRC='img/reload_scaled.png' DST='../../src/qt/res/movies/update_spinner.mng' TMPDIR='/tmp' TMPNAME='tmp-%03i.png' NUMFRAMES=35 FRAMERATE=10.0 CONVERT='convert' CLOCKWISE=True DSIZE=(16,16) im_src = Image.open(SRC) if CLOCKWISE: im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT) def frame_to_filename(frame): return path.join(TMPDIR, TMPNAME % frame) frame_files = [] for frame in xrange(NUMFRAMES): rotation = (frame + 0.5) / NUMFRAMES * 360.0 if CLOCKWISE: rotation = -rotation im_new = im_src.rotate(rotation, Image.BICUBIC) im_new.thumbnail(DSIZE, Image.ANTIALIAS) outfile = frame_to_filename(frame) im_new.save(outfile, 'png') frame_files.append(outfile) p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST]) p.communicate()
mit
adborden/django-calaccess-raw-data
calaccess_raw/management/commands/loadcalaccessrawfile.py
27
6471
from __future__ import unicode_literals import six from csvkit import CSVKitReader from django.db import connection from django.conf import settings from postgres_copy import CopyMapping from django.db.models.loading import get_model from calaccess_raw.management.commands import CalAccessCommand from django.core.management.base import LabelCommand, CommandError class Command(CalAccessCommand, LabelCommand): help = 'Load clean CAL-ACCESS file into its corresponding database model' args = '<model name>' # Trick for reformating date strings in source data so that they can # be gobbled up by MySQL. You'll see how below. date_sql = "DATE_FORMAT(str_to_date(@`%s`, '%%c/%%e/%%Y'), '%%Y-%%m-%%d')" datetime_sql = "DATE_FORMAT(str_to_date(@`%s`, '%%c/%%e/%%Y \ %%h:%%i:%%s %%p'), '%%Y-%%m-%%d %%H:%%i:%%s')" def handle_label(self, label, **options): self.verbosity = options.get("verbosity") self.cursor = connection.cursor() self.load(label) def load(self, model_name): """ Loads the source CSV for the provided model. """ if self.verbosity > 2: self.log(" Loading %s" % model_name) model = get_model("calaccess_raw", model_name) csv_path = model.objects.get_csv_path() if settings.DATABASES.get('dat') and six.PY2: self.load_dat(model, csv_path) engine = settings.DATABASES['default']['ENGINE'] if engine == 'django.db.backends.mysql': self.load_mysql(model, csv_path) elif engine in ( 'django.db.backends.postgresql_psycopg2' 'django.contrib.gis.db.backends.postgis' ): self.load_postgresql(model, csv_path) else: self.failure("Sorry your database engine is unsupported") raise CommandError( "Only MySQL and PostgresSQL backends supported." ) def load_dat(self, model, csv_path): """ Takes a model and a csv_path and loads it into dat """ import datpy dat_source = settings.DATABASES.get('dat') self.dat = datpy.Dat(dat_source['source']) dataset = self.dat.dataset(model._meta.db_table) try: dataset.import_file(csv_path, format='csv') dat_status = self.dat.status() model_count = dat_status['rows'] csv_count = self.get_row_count(csv_path) self.finish_load_message(model_count, csv_count) except datpy.DatException: raise CommandError( 'Failed to load dat for %s, %s' % ( model._meta.db_table, csv_path ) ) def load_mysql(self, model, csv_path): import warnings import MySQLdb warnings.filterwarnings("ignore", category=MySQLdb.Warning) # Flush the target model self.cursor.execute('TRUNCATE TABLE %s' % model._meta.db_table) # Build the MySQL LOAD DATA INFILE command bulk_sql_load_part_1 = """ LOAD DATA LOCAL INFILE '%s' INTO TABLE %s FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\\n' IGNORE 1 LINES ( """ % ( csv_path, model._meta.db_table ) # Get the headers and the row count from the source CSV csv_headers = self.get_headers(csv_path) csv_record_cnt = self.get_row_count(csv_path) header_sql_list = [] field_types = dict( (f.db_column, f.db_type(connection)) for f in model._meta.fields ) date_set_list = [] for h in csv_headers: # Pull the data type of the field data_type = field_types[h] # If it is a date field, we need to reformat the data # so that MySQL will properly parse it on the way in. if data_type == 'date': header_sql_list.append('@`%s`' % h) date_set_list.append( "`%s` = %s" % (h, self.date_sql % h) ) elif data_type == 'datetime': header_sql_list.append('@`%s`' % h) date_set_list.append( "`%s` = %s" % (h, self.datetime_sql % h) ) else: header_sql_list.append('`%s`' % h) bulk_sql_load = bulk_sql_load_part_1 + ','.join(header_sql_list) + ')' if date_set_list: bulk_sql_load += " set %s" % ",".join(date_set_list) # Run the query cnt = self.cursor.execute(bulk_sql_load) # Report back on how we did self.finish_load_message(cnt, csv_record_cnt) def load_postgresql(self, model, csv_path): """ Takes a model and a csv_path and loads it into postgresql """ # Drop all the records from the target model's real table self.cursor.execute('TRUNCATE TABLE "%s" CASCADE' % ( model._meta.db_table )) c = CopyMapping( model, csv_path, dict((f.name, f.db_column) for f in model._meta.fields), ) c.save(silent=True) # Print out the results csv_count = self.get_row_count(csv_path) model_count = model.objects.count() self.finish_load_message(model_count, csv_count) def get_headers(self, csv_path): """ Returns the column headers from the csv as a list. """ with open(csv_path, 'r') as infile: csv_reader = CSVKitReader(infile) headers = next(csv_reader) return headers def get_row_count(self, csv_path): """ Returns the number of rows in the file, not counting headers. """ with open(csv_path) as infile: row_count = len(infile.readlines()) - 1 return row_count def finish_load_message(self, model_count, csv_count): """ The message displayed about whether or not a load finished successfully. """ if self.verbosity: if model_count != csv_count and self.verbosity > 2: msg = ' Table record count doesn\'t match CSV. \ Table: %s\tCSV: %s' self.failure(msg % ( model_count, csv_count, ))
mit
archlinux/arch-security-tracker
tracker/view/show.py
2
27377
from collections import OrderedDict from collections import defaultdict from flask import redirect from flask import render_template from flask_login import current_user from jinja2.utils import escape from sqlalchemy import and_ from sqlalchemy_continuum import version_class from sqlalchemy_continuum import versioning_manager from config import TRACKER_ADVISORY_URL from config import TRACKER_BUGTRACKER_URL from config import TRACKER_GROUP_URL from config import TRACKER_ISSUE_URL from config import TRACKER_LOG_ENTRIES_PER_PAGE from config import TRACKER_SUMMARY_LENGTH_MAX from tracker import db from tracker import tracker from tracker.advisory import advisory_escape_html from tracker.advisory import advisory_extend_html from tracker.advisory import advisory_format_issue_listing from tracker.form.advisory import AdvisoryForm from tracker.model import CVE from tracker.model import Advisory from tracker.model import CVEGroup from tracker.model import CVEGroupEntry from tracker.model import CVEGroupPackage from tracker.model import Package from tracker.model.advisory import advisory_regex from tracker.model.cve import cve_id_regex from tracker.model.cvegroup import pkgname_regex from tracker.model.cvegroup import vulnerability_group_regex from tracker.model.enum import Publication from tracker.model.enum import Remote from tracker.model.enum import Status from tracker.model.package import filter_duplicate_packages from tracker.model.package import sort_packages from tracker.user import user_can_delete_group from tracker.user import user_can_delete_issue from tracker.user import user_can_edit_group from tracker.user import user_can_edit_issue from tracker.user import user_can_handle_advisory from tracker.user import user_can_watch_log from tracker.user import user_can_watch_user_log from tracker.util import json_response from tracker.util import multiline_to_list from tracker.view.error import not_found def get_bug_project(databases): bug_project_mapping = { 1: ['core', 'extra', 'testing'], 5: ['community', 'community-testing', 'multilib', 'multilib-testing'] } for category, repos in bug_project_mapping.items(): if all((database in repos for database in databases)): return category # Fallback return 1 def get_bug_data(cves, pkgs, versions, group): references = [] references = [ref for ref in multiline_to_list(group.reference) if ref not in references] list(map(lambda issue: references.extend( [ref for ref in multiline_to_list(issue.reference) if ref not in references]), cves)) severity_sorted_issues = sorted(cves, key=lambda issue: issue.issue_type) severity_sorted_issues = sorted(severity_sorted_issues, key=lambda issue: issue.severity) unique_issue_types = [] for issue in severity_sorted_issues: if issue.issue_type not in unique_issue_types: unique_issue_types.append(issue.issue_type) bug_desc = render_template('bug.txt', cves=cves, group=group, references=references, pkgs=pkgs, unique_issue_types=unique_issue_types, TRACKER_ISSUE_URL=TRACKER_ISSUE_URL, TRACKER_GROUP_URL=TRACKER_GROUP_URL) pkg_str = ' '.join((pkg.pkgname for pkg in pkgs)) group_type = 'multiple issues' if len(unique_issue_types) > 1 else unique_issue_types[0] summary = '[{}] [Security] {} ({})'.format(pkg_str, group_type, ' '.join([cve.id for cve in cves])) if TRACKER_SUMMARY_LENGTH_MAX != 0 and len(summary) > TRACKER_SUMMARY_LENGTH_MAX: summary = "[{}] [Security] {} (Multiple CVE's)".format(pkg_str, group_type) # 5: critical, 4: high, 3: medium, 2: low, 1: very low. severitiy_mapping = { 'unknown': 3, 'critical': 5, 'high': 4, 'medium': 3, 'low': 2, } task_severity = severitiy_mapping.get(group.severity.name) project = get_bug_project((pkg.database for pkg in versions)) return { 'project': project, 'product_category': 13, # security 'item_summary': summary, 'task_severity': task_severity, 'detailed_desc': bug_desc } def get_cve_data(cve): cve_model = CVE.query.get(cve) if not cve_model: return None entries = (db.session.query(CVEGroupEntry, CVEGroup, CVEGroupPackage, Advisory) .filter_by(cve=cve_model) .join(CVEGroup, CVEGroupEntry.group) .join(CVEGroupPackage, CVEGroup.packages) .outerjoin(Advisory, Advisory.group_package_id == CVEGroupPackage.id) .order_by(CVEGroup.created.desc()).order_by(CVEGroupPackage.pkgname)).all() group_packages = defaultdict(set) advisories = set() groups = set() for cve, group, pkg, advisory in entries: group_packages[group].add(pkg.pkgname) groups.add(group) if advisory: advisories.add(advisory) groups = sorted(groups, key=lambda item: item.created, reverse=True) groups = sorted(groups, key=lambda item: item.status) advisories = sorted(advisories, key=lambda item: item.id, reverse=True) group_packages = dict(map(lambda item: (item[0], sorted(item[1])), group_packages.items())) return {'issue': cve_model, 'groups': groups, 'group_packages': group_packages, 'advisories': advisories} @tracker.route('/<regex("((issues?|cve)/)?"):path><regex("{}"):cve><regex("[./]json"):suffix>'.format(cve_id_regex[1:-1]), methods=['GET']) @json_response def show_cve_json(cve, path=None, suffix=None): data = get_cve_data(cve) if not data: return not_found(json=True) cve = data['issue'] references = cve.reference.replace('\r', '').split('\n') if cve.reference else [] packages = list(set(sorted([item for sublist in data['group_packages'].values() for item in sublist]))) advisories = data['advisories'] if not current_user.role.is_reporter: advisories = list(filter(lambda advisory: advisory.publication == Publication.published, advisories)) json_data = OrderedDict() json_data['name'] = cve.id json_data['type'] = cve.issue_type json_data['severity'] = cve.severity.label json_data['vector'] = cve.remote.label json_data['description'] = cve.description json_data['groups'] = [str(group) for group in data['groups']] json_data['packages'] = packages json_data['advisories'] = [advisory.id for advisory in advisories] json_data['references'] = references json_data['notes'] = cve.notes if cve.notes else None return json_data @tracker.route('/<regex("((issues?|cve)/)?"):path><regex("{}"):cve>'.format(cve_id_regex[1:]), methods=['GET']) def show_cve(cve, path=None): data = get_cve_data(cve) if not data: return not_found() packages = list(set(sorted([item for sublist in data['group_packages'].values() for item in sublist]))) title = '{} - {}'.format(data['issue'].id, ' '.join(packages)) \ if len(packages) else \ '{}'.format(data['issue'].id) advisories = data['advisories'] if not current_user.role.is_reporter: advisories = list(filter(lambda advisory: advisory.publication == Publication.published, advisories)) return render_template('cve.html', title=title, issue=data['issue'], groups=data['groups'], group_packages=data['group_packages'], advisories=advisories, can_watch_log=user_can_watch_log(), can_edit=user_can_edit_issue(advisories), can_delete=user_can_delete_issue(advisories)) @tracker.route('/<regex("((issues?|cve)/)?"):path><regex("{}"):cve>/log'.format(cve_id_regex[1:-1]), methods=['GET']) def show_cve_log(cve, path=None): data = get_cve_data(cve) if not data: return not_found() title = '{} - log'.format(data['issue'].id) return render_template('log/cve_log.html', title=title, issue=data['issue'], can_watch_user_log=user_can_watch_user_log()) def get_group_data(avg): avg_id = int(avg.replace('AVG-', '')) entries = (db.session.query(CVEGroup, CVE, CVEGroupPackage, Advisory, Package) .filter(CVEGroup.id == avg_id) .join(CVEGroupEntry, CVEGroup.issues) .join(CVE, CVEGroupEntry.cve) .join(CVEGroupPackage, CVEGroup.packages) .outerjoin(Package, Package.name == CVEGroupPackage.pkgname) .outerjoin(Advisory, Advisory.group_package_id == CVEGroupPackage.id)).all() if not entries: return None group = None issues = set() packages = set() advisories = set() issue_types = set() versions = set() for group_entry, cve, pkg, advisory, package in entries: group = group_entry issues.add(cve) issue_types.add(cve.issue_type) packages.add(pkg) if package: versions.add(package) if advisory: advisories.add(advisory) advisories = sorted(advisories, key=lambda item: item.id, reverse=True) issue_types = list(issue_types) issues = sorted(issues, key=lambda item: item, reverse=True) packages = sorted(packages, key=lambda item: item.pkgname) versions = filter_duplicate_packages(sort_packages(list(versions)), True) advisories_pending = group.status == Status.fixed and group.advisory_qualified and len(advisories) <= 0 return { 'group': group, 'packages': packages, 'versions': versions, 'issues': issues, 'issue_types': issue_types, 'advisories': advisories, 'advisories_pending': advisories_pending } @tracker.route('/group/<regex("{}"):avg><regex("[./]json"):postfix>'.format(vulnerability_group_regex[1:-1]), methods=['GET']) @tracker.route('/avg/<regex("{}"):avg><regex("[./]json"):postfix>'.format(vulnerability_group_regex[1:-1]), methods=['GET']) @tracker.route('/<regex("{}"):avg><regex("[./]json"):postfix>'.format(vulnerability_group_regex[1:-1]), methods=['GET']) @json_response def show_group_json(avg, postfix=None): data = get_group_data(avg) if not data: return not_found(json=True) group = data['group'] advisories = data['advisories'] if not current_user.role.is_reporter: advisories = list(filter(lambda advisory: advisory.publication == Publication.published, advisories)) issues = data['issues'] packages = data['packages'] issue_types = data['issue_types'] references = group.reference.replace('\r', '').split('\n') if group.reference else [] json_data = OrderedDict() json_data['name'] = group.name json_data['packages'] = [package.pkgname for package in packages] json_data['status'] = group.status.label json_data['severity'] = group.severity.label json_data['type'] = 'multiple issues' if len(issue_types) > 1 else issue_types[0] json_data['affected'] = group.affected json_data['fixed'] = group.fixed if group.fixed else None json_data['ticket'] = group.bug_ticket if group.bug_ticket else None json_data['issues'] = [str(cve) for cve in issues] json_data['advisories'] = [advisory.id for advisory in advisories] json_data['references'] = references json_data['notes'] = group.notes if group.notes else None return json_data @tracker.route('/group/<regex("{}"):avg>'.format(vulnerability_group_regex[1:]), methods=['GET']) @tracker.route('/avg/<regex("{}"):avg>'.format(vulnerability_group_regex[1:]), methods=['GET']) @tracker.route('/<regex("{}"):avg>'.format(vulnerability_group_regex[1:]), methods=['GET']) def show_group(avg): data = get_group_data(avg) if not data: return not_found() group = data['group'] advisories = data['advisories'] if not current_user.role.is_reporter: advisories = list(filter(lambda advisory: advisory.publication == Publication.published, advisories)) issues = data['issues'] packages = data['packages'] issue_types = data['issue_types'] versions = data['versions'] issue_type = 'multiple issues' if len(issue_types) > 1 else issue_types[0] pkgnames = list(set(sorted([pkg.pkgname for pkg in packages]))) form = AdvisoryForm() form.advisory_type.data = issue_type return render_template('group.html', title='{} - {}'.format(group, ' '.join(pkgnames)), form=form, group=group, packages=packages, issues=issues, advisories=advisories, versions=versions, Status=Status, issue_type=issue_type, bug_data=get_bug_data(issues, packages, versions, group), advisories_pending=data['advisories_pending'], can_edit=user_can_edit_group(advisories), can_delete=user_can_delete_group(advisories), can_handle_advisory=user_can_handle_advisory(), can_watch_log=user_can_watch_log()) def get_package_data(pkgname): entries = (db.session.query(Package, CVEGroup, CVE, Advisory) .filter(Package.name == pkgname) .outerjoin(CVEGroupPackage, CVEGroupPackage.pkgname == Package.name) .outerjoin(CVEGroup, CVEGroupPackage.group) .outerjoin(CVEGroupEntry, CVEGroup.issues) .outerjoin(CVE, CVEGroupEntry.cve) .outerjoin(Advisory, and_(Advisory.group_package_id == CVEGroupPackage.id, Advisory.publication == Publication.published)) ).all() # fallback for dropped packages if not entries: entries = (db.session.query(CVEGroupPackage, CVEGroup, CVE, Advisory) .filter(CVEGroupPackage.pkgname == pkgname) .join(CVEGroup, CVEGroupPackage.group) .join(CVEGroupEntry, CVEGroup.issues) .join(CVE, CVEGroupEntry.cve) .outerjoin(Advisory, and_(Advisory.group_package_id == CVEGroupPackage.id, Advisory.publication == Publication.published)) ).all() if not entries: return None groups = set() issues = set() advisories = set() versions = set() for package, group, cve, advisory in entries: if isinstance(package, Package): versions.add(package) if group: groups.add(group) if cve: issues.add((cve, group)) if advisory: advisories.add(advisory) issues = [{'issue': e[0], 'group': e[1]} for e in issues] issues = sorted(issues, key=lambda item: item['issue'], reverse=True) issues = sorted(issues, key=lambda item: item['group'].status) groups = sorted(groups, key=lambda item: item.id, reverse=True) groups = sorted(groups, key=lambda item: item.status) advisories = sorted(advisories, key=lambda item: item.id, reverse=True) versions = filter_duplicate_packages(sort_packages(list(versions)), True) package = versions[0] if versions else None return { 'package': package, 'pkgname': pkgname, 'versions': versions, 'groups': groups, 'issues': issues, 'advisories': advisories } @tracker.route('/group/<regex("{}"):avg>/log'.format(vulnerability_group_regex[1:-1]), methods=['GET']) @tracker.route('/avg/<regex("{}"):avg>/log'.format(vulnerability_group_regex[1:-1]), methods=['GET']) @tracker.route('/<regex("{}"):avg>/log'.format(vulnerability_group_regex[1:-1]), methods=['GET']) def show_group_log(avg): data = get_group_data(avg) if not data: return not_found(json=True) group = data['group'] return render_template('log/group_log.html', title='{} - log'.format(group), group=group, Status=Status, advisories_pending=data['advisories_pending'], can_watch_user_log=user_can_watch_user_log()) @tracker.route('/package/<regex("{}"):pkgname><regex("[./]json"):suffix>'.format(pkgname_regex[1:-1]), methods=['GET']) @json_response def show_package_json(pkgname, suffix=None): data = get_package_data(pkgname) if not data: return not_found(json=True) advisories = data['advisories'] versions = data['versions'] groups = data['groups'] issues = data['issues'] json_advisory = [] for advisory in advisories: entry = OrderedDict() entry['name'] = advisory.id entry['date'] = advisory.created.strftime('%Y-%m-%d') entry['severity'] = advisory.group_package.group.severity.label entry['type'] = advisory.advisory_type entry['reference'] = advisory.reference if advisory.reference else None json_advisory.append(entry) json_versions = [] for version in versions: entry = OrderedDict() entry['version'] = version.version entry['database'] = version.database json_versions.append(entry) json_groups = [] for group in groups: entry = OrderedDict() entry['name'] = group.name entry['status'] = group.status.label entry['severity'] = group.severity.label json_groups.append(entry) json_issues = [] for issue in issues: group = issue['group'] issue = issue['issue'] entry = OrderedDict() entry['name'] = issue.id entry['severity'] = issue.severity.label entry['type'] = issue.issue_type entry['status'] = group.status.label json_issues.append(entry) json_data = OrderedDict() json_data['name'] = pkgname json_data['versions'] = json_versions json_data['advisories'] = json_advisory json_data['groups'] = json_groups json_data['issues'] = json_issues return json_data @tracker.route('/package/<regex("{}"):pkgname>'.format(pkgname_regex[1:]), methods=['GET']) def show_package(pkgname): data = get_package_data(pkgname) if not data: return not_found() groups = data['groups'] data['groups'] = {'open': list(filter(lambda group: group.status.open(), groups)), 'resolved': list(filter(lambda group: group.status.resolved(), groups))} issues = data['issues'] data['issues'] = {'open': list(filter(lambda issue: issue['group'].status.open(), issues)), 'resolved': list(filter(lambda issue: issue['group'].status.resolved(), issues))} return render_template('package.html', title='{}'.format(pkgname), package=data) def render_html_advisory(advisory, package, group, raw_asa, generated): return render_template('advisory.html', title='[{}] {}: {}'.format(advisory.id, package.pkgname, advisory.advisory_type), advisory=advisory, package=package, raw_asa=raw_asa, generated=generated, can_handle_advisory=user_can_handle_advisory(), Publication=Publication) @tracker.route('/advisory/<regex("{}"):advisory_id>/raw'.format(advisory_regex[1:-1]), methods=['GET']) @tracker.route('/<regex("{}"):advisory_id>/raw'.format(advisory_regex[1:-1]), methods=['GET']) def show_advisory_raw(advisory_id): result = show_advisory(advisory_id, raw=True) if isinstance(result, tuple): return result if not isinstance(result, str): return result return result, 200, {'Content-Type': 'text/plain; charset=utf-8'} @tracker.route('/advisory/<regex("{}"):advisory_id>/generate/raw'.format(advisory_regex[1:-1]), methods=['GET']) @tracker.route('/<regex("{}"):advisory_id>/generate/raw'.format(advisory_regex[1:-1]), methods=['GET']) def show_generated_advisory_raw(advisory_id): result = show_generated_advisory(advisory_id, raw=True) if isinstance(result, tuple): return result if not isinstance(result, str): return result return result, 200, {'Content-Type': 'text/plain; charset=utf-8'} @tracker.route('/advisory/<regex("{}"):advisory_id>'.format(advisory_regex[1:]), methods=['GET']) @tracker.route('/<regex("{}"):advisory_id>'.format(advisory_regex[1:]), methods=['GET']) def show_advisory(advisory_id, raw=False): entries = (db.session.query(Advisory, CVEGroup, CVEGroupPackage, CVE) .filter(Advisory.id == advisory_id) .join(CVEGroupPackage, Advisory.group_package) .join(CVEGroup, CVEGroupPackage.group) .join(CVEGroupEntry, CVEGroup.issues) .join(CVE, CVEGroupEntry.cve) .order_by(CVE.id) ).all() if not entries: return not_found() advisory = entries[0][0] group = entries[0][1] package = entries[0][2] issues = [issue for (advisory, group, package, issue) in entries] if not advisory.content: if raw: return redirect('/{}/generate/raw'.format(advisory_id)) return redirect('/{}/generate'.format(advisory_id)) if raw: return advisory.content asa = advisory_extend_html(advisory_escape_html(advisory.content), issues, package) return render_html_advisory(advisory=advisory, package=package, group=group, raw_asa=asa, generated=False) @tracker.route('/advisory/<regex("{}"):advisory_id>/generate'.format(advisory_regex[1:-1]), methods=['GET']) @tracker.route('/<regex("{}"):advisory_id>/generate'.format(advisory_regex[1:-1]), methods=['GET']) def show_generated_advisory(advisory_id, raw=False): entries = (db.session.query(Advisory, CVEGroup, CVEGroupPackage, CVE) .filter(Advisory.id == advisory_id) .join(CVEGroupPackage, Advisory.group_package) .join(CVEGroup, CVEGroupPackage.group) .join(CVEGroupEntry, CVEGroup.issues) .join(CVE, CVEGroupEntry.cve) .order_by(CVE.id) ).all() if not entries: return not_found() advisory = entries[0][0] group = entries[0][1] package = entries[0][2] issues = sorted([issue for (advisory, group, package, issue) in entries]) severity_sorted_issues = sorted(issues, key=lambda issue: issue.issue_type) severity_sorted_issues = sorted(severity_sorted_issues, key=lambda issue: issue.severity) remote = any([issue.remote is Remote.remote for issue in issues]) issue_listing_formatted = advisory_format_issue_listing([issue.id for issue in issues]) link = TRACKER_ADVISORY_URL.format(advisory.id, group.id) upstream_released = group.affected.split('-')[0].split('+')[0] != group.fixed.split('-')[0].split('+')[0] upstream_version = group.fixed.split('-')[0].split('+')[0] if ':' in upstream_version: upstream_version = upstream_version[upstream_version.index(':') + 1:] unique_issue_types = [] for issue in severity_sorted_issues: if issue.issue_type not in unique_issue_types: unique_issue_types.append(issue.issue_type) references = [] if group.bug_ticket: references.append(TRACKER_BUGTRACKER_URL.format(group.bug_ticket)) references.extend([ref for ref in multiline_to_list(group.reference) if ref not in references]) list(map(lambda issue: references.extend( [ref for ref in multiline_to_list(issue.reference) if ref not in references]), issues)) raw_asa = render_template('advisory.txt', advisory=advisory, group=group, package=package, issues=issues, remote=remote, issue_listing_formatted=issue_listing_formatted, link=link, workaround=advisory.workaround, impact=advisory.impact, upstream_released=upstream_released, upstream_version=upstream_version, unique_issue_types=unique_issue_types, references=references, TRACKER_ISSUE_URL=TRACKER_ISSUE_URL, TRACKER_GROUP_URL=TRACKER_GROUP_URL) if raw: return raw_asa raw_asa = '\n'.join(raw_asa.split('\n')[2:]) raw_asa = str(escape(raw_asa)) raw_asa = advisory_extend_html(raw_asa, issues, package) return render_html_advisory(advisory=advisory, package=package, group=group, raw_asa=raw_asa, generated=True) @tracker.route('/advisory/<regex("{}"):advisory_id>/log'.format(advisory_regex[1:-1]), methods=['GET']) @tracker.route('/<regex("{}"):advisory_id>/log'.format(advisory_regex[1:-1]), methods=['GET']) def show_advisory_log(advisory_id, path=None): advisory = (db.session.query(Advisory) .filter(Advisory.id == advisory_id) ).first() if not advisory: return not_found() return render_template('log/advisory_log.html', title='{} - log'.format(advisory_id), advisory=advisory, can_watch_user_log=user_can_watch_user_log()) # TODO: define permission to view this @tracker.route('/log', defaults={'page': 1}, methods=['GET']) @tracker.route('/log/page/<int:page>', methods=['GET']) def show_log(page=1): Transaction = versioning_manager.transaction_cls VersionClassCVE = version_class(CVE) VersionClassGroup = version_class(CVEGroup) VersionClassAdvisory = version_class(Advisory) pagination = (db.session.query(Transaction, VersionClassCVE, VersionClassGroup, VersionClassAdvisory) .outerjoin(VersionClassCVE, Transaction.id == VersionClassCVE.transaction_id) .outerjoin(VersionClassGroup, Transaction.id == VersionClassGroup.transaction_id) .outerjoin(VersionClassAdvisory, Transaction.id == VersionClassAdvisory.transaction_id) .order_by(Transaction.issued_at.desc()) .filter((VersionClassCVE.transaction_id) | (VersionClassGroup.transaction_id) | (VersionClassAdvisory.transaction_id)) ).paginate(page, TRACKER_LOG_ENTRIES_PER_PAGE, True) return render_template('log/log.html', title=f'Log', can_watch_user_log=user_can_watch_user_log(), pagination=pagination, CVE=CVE, CVEGroup=CVEGroup, Advisory=Advisory)
mit
llvm-mirror/lldb
packages/Python/lldbsuite/test/lldbutil.py
3
46126
""" This LLDB module contains miscellaneous utilities. Some of the test suite takes advantage of the utility functions defined here. They can also be useful for general purpose lldb scripting. """ from __future__ import print_function from __future__ import absolute_import # System modules import errno import os import re import sys # Third-party modules from six import StringIO as SixStringIO import six # LLDB modules import lldb # =================================================== # Utilities for locating/checking executable programs # =================================================== def is_exe(fpath): """Returns True if fpath is an executable.""" return os.path.isfile(fpath) and os.access(fpath, os.X_OK) def which(program): """Returns the full path to a program; None otherwise.""" fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def mkdir_p(path): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise if not os.path.isdir(path): raise OSError(errno.ENOTDIR, "%s is not a directory"%path) # =================================================== # Disassembly for an SBFunction or an SBSymbol object # =================================================== def disassemble(target, function_or_symbol): """Disassemble the function or symbol given a target. It returns the disassembly content in a string object. """ buf = SixStringIO() insts = function_or_symbol.GetInstructions(target) for i in insts: print(i, file=buf) return buf.getvalue() # ========================================================== # Integer (byte size 1, 2, 4, and 8) to bytearray conversion # ========================================================== def int_to_bytearray(val, bytesize): """Utility function to convert an integer into a bytearray. It returns the bytearray in the little endian format. It is easy to get the big endian format, just do ba.reverse() on the returned object. """ import struct if bytesize == 1: return bytearray([val]) # Little endian followed by a format character. template = "<%c" if bytesize == 2: fmt = template % 'h' elif bytesize == 4: fmt = template % 'i' elif bytesize == 4: fmt = template % 'q' else: return None packed = struct.pack(fmt, val) return bytearray(packed) def bytearray_to_int(bytes, bytesize): """Utility function to convert a bytearray into an integer. It interprets the bytearray in the little endian format. For a big endian bytearray, just do ba.reverse() on the object before passing it in. """ import struct if bytesize == 1: return bytes[0] # Little endian followed by a format character. template = "<%c" if bytesize == 2: fmt = template % 'h' elif bytesize == 4: fmt = template % 'i' elif bytesize == 4: fmt = template % 'q' else: return None unpacked = struct.unpack_from(fmt, bytes) return unpacked[0] # ============================================================== # Get the description of an lldb object or None if not available # ============================================================== def get_description(obj, option=None): """Calls lldb_obj.GetDescription() and returns a string, or None. For SBTarget, SBBreakpointLocation, and SBWatchpoint lldb objects, an extra option can be passed in to describe the detailed level of description desired: o lldb.eDescriptionLevelBrief o lldb.eDescriptionLevelFull o lldb.eDescriptionLevelVerbose """ method = getattr(obj, 'GetDescription') if not method: return None tuple = (lldb.SBTarget, lldb.SBBreakpointLocation, lldb.SBWatchpoint) if isinstance(obj, tuple): if option is None: option = lldb.eDescriptionLevelBrief stream = lldb.SBStream() if option is None: success = method(stream) else: success = method(stream, option) if not success: return None return stream.GetData() # ================================================= # Convert some enum value to its string counterpart # ================================================= def state_type_to_str(enum): """Returns the stateType string given an enum.""" if enum == lldb.eStateInvalid: return "invalid" elif enum == lldb.eStateUnloaded: return "unloaded" elif enum == lldb.eStateConnected: return "connected" elif enum == lldb.eStateAttaching: return "attaching" elif enum == lldb.eStateLaunching: return "launching" elif enum == lldb.eStateStopped: return "stopped" elif enum == lldb.eStateRunning: return "running" elif enum == lldb.eStateStepping: return "stepping" elif enum == lldb.eStateCrashed: return "crashed" elif enum == lldb.eStateDetached: return "detached" elif enum == lldb.eStateExited: return "exited" elif enum == lldb.eStateSuspended: return "suspended" else: raise Exception("Unknown StateType enum") def stop_reason_to_str(enum): """Returns the stopReason string given an enum.""" if enum == lldb.eStopReasonInvalid: return "invalid" elif enum == lldb.eStopReasonNone: return "none" elif enum == lldb.eStopReasonTrace: return "trace" elif enum == lldb.eStopReasonBreakpoint: return "breakpoint" elif enum == lldb.eStopReasonWatchpoint: return "watchpoint" elif enum == lldb.eStopReasonExec: return "exec" elif enum == lldb.eStopReasonSignal: return "signal" elif enum == lldb.eStopReasonException: return "exception" elif enum == lldb.eStopReasonPlanComplete: return "plancomplete" elif enum == lldb.eStopReasonThreadExiting: return "threadexiting" else: raise Exception("Unknown StopReason enum") def symbol_type_to_str(enum): """Returns the symbolType string given an enum.""" if enum == lldb.eSymbolTypeInvalid: return "invalid" elif enum == lldb.eSymbolTypeAbsolute: return "absolute" elif enum == lldb.eSymbolTypeCode: return "code" elif enum == lldb.eSymbolTypeData: return "data" elif enum == lldb.eSymbolTypeTrampoline: return "trampoline" elif enum == lldb.eSymbolTypeRuntime: return "runtime" elif enum == lldb.eSymbolTypeException: return "exception" elif enum == lldb.eSymbolTypeSourceFile: return "sourcefile" elif enum == lldb.eSymbolTypeHeaderFile: return "headerfile" elif enum == lldb.eSymbolTypeObjectFile: return "objectfile" elif enum == lldb.eSymbolTypeCommonBlock: return "commonblock" elif enum == lldb.eSymbolTypeBlock: return "block" elif enum == lldb.eSymbolTypeLocal: return "local" elif enum == lldb.eSymbolTypeParam: return "param" elif enum == lldb.eSymbolTypeVariable: return "variable" elif enum == lldb.eSymbolTypeVariableType: return "variabletype" elif enum == lldb.eSymbolTypeLineEntry: return "lineentry" elif enum == lldb.eSymbolTypeLineHeader: return "lineheader" elif enum == lldb.eSymbolTypeScopeBegin: return "scopebegin" elif enum == lldb.eSymbolTypeScopeEnd: return "scopeend" elif enum == lldb.eSymbolTypeAdditional: return "additional" elif enum == lldb.eSymbolTypeCompiler: return "compiler" elif enum == lldb.eSymbolTypeInstrumentation: return "instrumentation" elif enum == lldb.eSymbolTypeUndefined: return "undefined" def value_type_to_str(enum): """Returns the valueType string given an enum.""" if enum == lldb.eValueTypeInvalid: return "invalid" elif enum == lldb.eValueTypeVariableGlobal: return "global_variable" elif enum == lldb.eValueTypeVariableStatic: return "static_variable" elif enum == lldb.eValueTypeVariableArgument: return "argument_variable" elif enum == lldb.eValueTypeVariableLocal: return "local_variable" elif enum == lldb.eValueTypeRegister: return "register" elif enum == lldb.eValueTypeRegisterSet: return "register_set" elif enum == lldb.eValueTypeConstResult: return "constant_result" else: raise Exception("Unknown ValueType enum") # ================================================== # Get stopped threads due to each stop reason. # ================================================== def sort_stopped_threads(process, breakpoint_threads=None, crashed_threads=None, watchpoint_threads=None, signal_threads=None, exiting_threads=None, other_threads=None): """ Fills array *_threads with threads stopped for the corresponding stop reason. """ for lst in [breakpoint_threads, watchpoint_threads, signal_threads, exiting_threads, other_threads]: if lst is not None: lst[:] = [] for thread in process: dispatched = False for (reason, list) in [(lldb.eStopReasonBreakpoint, breakpoint_threads), (lldb.eStopReasonException, crashed_threads), (lldb.eStopReasonWatchpoint, watchpoint_threads), (lldb.eStopReasonSignal, signal_threads), (lldb.eStopReasonThreadExiting, exiting_threads), (None, other_threads)]: if not dispatched and list is not None: if thread.GetStopReason() == reason or reason is None: list.append(thread) dispatched = True # ================================================== # Utility functions for setting breakpoints # ================================================== def run_break_set_by_script( test, class_name, extra_options=None, num_expected_locations=1): """Set a scripted breakpoint. Check that it got the right number of locations.""" test.assertTrue(class_name is not None, "Must pass in a class name.") command = "breakpoint set -P " + class_name if extra_options is not None: command += " " + extra_options break_results = run_break_set_command(test, command) check_breakpoint_result(test, break_results, num_locations=num_expected_locations) return get_bpno_from_match(break_results) def run_break_set_by_file_and_line( test, file_name, line_number, extra_options=None, num_expected_locations=1, loc_exact=False, module_name=None): """Set a breakpoint by file and line, returning the breakpoint number. If extra_options is not None, then we append it to the breakpoint set command. If num_expected_locations is -1, we check that we got AT LEAST one location. If num_expected_locations is -2, we don't check the actual number at all. Otherwise, we check that num_expected_locations equals the number of locations. If loc_exact is true, we check that there is one location, and that location must be at the input file and line number.""" if file_name is None: command = 'breakpoint set -l %d' % (line_number) else: command = 'breakpoint set -f "%s" -l %d' % (file_name, line_number) if module_name: command += " --shlib '%s'" % (module_name) if extra_options: command += " " + extra_options break_results = run_break_set_command(test, command) if num_expected_locations == 1 and loc_exact: check_breakpoint_result( test, break_results, num_locations=num_expected_locations, file_name=file_name, line_number=line_number, module_name=module_name) else: check_breakpoint_result( test, break_results, num_locations=num_expected_locations) return get_bpno_from_match(break_results) def run_break_set_by_symbol( test, symbol, extra_options=None, num_expected_locations=-1, sym_exact=False, module_name=None): """Set a breakpoint by symbol name. Common options are the same as run_break_set_by_file_and_line. If sym_exact is true, then the output symbol must match the input exactly, otherwise we do a substring match.""" command = 'breakpoint set -n "%s"' % (symbol) if module_name: command += " --shlib '%s'" % (module_name) if extra_options: command += " " + extra_options break_results = run_break_set_command(test, command) if num_expected_locations == 1 and sym_exact: check_breakpoint_result( test, break_results, num_locations=num_expected_locations, symbol_name=symbol, module_name=module_name) else: check_breakpoint_result( test, break_results, num_locations=num_expected_locations) return get_bpno_from_match(break_results) def run_break_set_by_selector( test, selector, extra_options=None, num_expected_locations=-1, module_name=None): """Set a breakpoint by selector. Common options are the same as run_break_set_by_file_and_line.""" command = 'breakpoint set -S "%s"' % (selector) if module_name: command += ' --shlib "%s"' % (module_name) if extra_options: command += " " + extra_options break_results = run_break_set_command(test, command) if num_expected_locations == 1: check_breakpoint_result( test, break_results, num_locations=num_expected_locations, symbol_name=selector, symbol_match_exact=False, module_name=module_name) else: check_breakpoint_result( test, break_results, num_locations=num_expected_locations) return get_bpno_from_match(break_results) def run_break_set_by_regexp( test, regexp, extra_options=None, num_expected_locations=-1): """Set a breakpoint by regular expression match on symbol name. Common options are the same as run_break_set_by_file_and_line.""" command = 'breakpoint set -r "%s"' % (regexp) if extra_options: command += " " + extra_options break_results = run_break_set_command(test, command) check_breakpoint_result( test, break_results, num_locations=num_expected_locations) return get_bpno_from_match(break_results) def run_break_set_by_source_regexp( test, regexp, extra_options=None, num_expected_locations=-1): """Set a breakpoint by source regular expression. Common options are the same as run_break_set_by_file_and_line.""" command = 'breakpoint set -p "%s"' % (regexp) if extra_options: command += " " + extra_options break_results = run_break_set_command(test, command) check_breakpoint_result( test, break_results, num_locations=num_expected_locations) return get_bpno_from_match(break_results) def run_break_set_command(test, command): """Run the command passed in - it must be some break set variant - and analyze the result. Returns a dictionary of information gleaned from the command-line results. Will assert if the breakpoint setting fails altogether. Dictionary will contain: bpno - breakpoint of the newly created breakpoint, -1 on error. num_locations - number of locations set for the breakpoint. If there is only one location, the dictionary MAY contain: file - source file name line_no - source line number symbol - symbol name inline_symbol - inlined symbol name offset - offset from the original symbol module - module address - address at which the breakpoint was set.""" patterns = [ r"^Breakpoint (?P<bpno>[0-9]+): (?P<num_locations>[0-9]+) locations\.$", r"^Breakpoint (?P<bpno>[0-9]+): (?P<num_locations>no) locations \(pending\)\.", r"^Breakpoint (?P<bpno>[0-9]+): where = (?P<module>.*)`(?P<symbol>[+\-]{0,1}[^+]+)( \+ (?P<offset>[0-9]+)){0,1}( \[inlined\] (?P<inline_symbol>.*)){0,1} at (?P<file>[^:]+):(?P<line_no>[0-9]+)(?P<column>(:[0-9]+)?), address = (?P<address>0x[0-9a-fA-F]+)$", r"^Breakpoint (?P<bpno>[0-9]+): where = (?P<module>.*)`(?P<symbol>.*)( \+ (?P<offset>[0-9]+)){0,1}, address = (?P<address>0x[0-9a-fA-F]+)$"] match_object = test.match(command, patterns) break_results = match_object.groupdict() # We always insert the breakpoint number, setting it to -1 if we couldn't find it # Also, make sure it gets stored as an integer. if not 'bpno' in break_results: break_results['bpno'] = -1 else: break_results['bpno'] = int(break_results['bpno']) # We always insert the number of locations # If ONE location is set for the breakpoint, then the output doesn't mention locations, but it has to be 1... # We also make sure it is an integer. if not 'num_locations' in break_results: num_locations = 1 else: num_locations = break_results['num_locations'] if num_locations == 'no': num_locations = 0 else: num_locations = int(break_results['num_locations']) break_results['num_locations'] = num_locations if 'line_no' in break_results: break_results['line_no'] = int(break_results['line_no']) return break_results def get_bpno_from_match(break_results): return int(break_results['bpno']) def check_breakpoint_result( test, break_results, file_name=None, line_number=-1, symbol_name=None, symbol_match_exact=True, module_name=None, offset=-1, num_locations=-1): out_num_locations = break_results['num_locations'] if num_locations == -1: test.assertTrue(out_num_locations > 0, "Expecting one or more locations, got none.") elif num_locations != -2: test.assertTrue( num_locations == out_num_locations, "Expecting %d locations, got %d." % (num_locations, out_num_locations)) if file_name: out_file_name = "" if 'file' in break_results: out_file_name = break_results['file'] test.assertTrue( file_name.endswith(out_file_name), "Breakpoint file name '%s' doesn't match resultant name '%s'." % (file_name, out_file_name)) if line_number != -1: out_line_number = -1 if 'line_no' in break_results: out_line_number = break_results['line_no'] test.assertTrue( line_number == out_line_number, "Breakpoint line number %s doesn't match resultant line %s." % (line_number, out_line_number)) if symbol_name: out_symbol_name = "" # Look first for the inlined symbol name, otherwise use the symbol # name: if 'inline_symbol' in break_results and break_results['inline_symbol']: out_symbol_name = break_results['inline_symbol'] elif 'symbol' in break_results: out_symbol_name = break_results['symbol'] if symbol_match_exact: test.assertTrue( symbol_name == out_symbol_name, "Symbol name '%s' doesn't match resultant symbol '%s'." % (symbol_name, out_symbol_name)) else: test.assertTrue( out_symbol_name.find(symbol_name) != - 1, "Symbol name '%s' isn't in resultant symbol '%s'." % (symbol_name, out_symbol_name)) if module_name: out_module_name = None if 'module' in break_results: out_module_name = break_results['module'] test.assertTrue( module_name.find(out_module_name) != - 1, "Symbol module name '%s' isn't in expected module name '%s'." % (out_module_name, module_name)) # ================================================== # Utility functions related to Threads and Processes # ================================================== def get_stopped_threads(process, reason): """Returns the thread(s) with the specified stop reason in a list. The list can be empty if no such thread exists. """ threads = [] for t in process: if t.GetStopReason() == reason: threads.append(t) return threads def get_stopped_thread(process, reason): """A convenience function which returns the first thread with the given stop reason or None. Example usages: 1. Get the stopped thread due to a breakpoint condition ... from lldbutil import get_stopped_thread thread = get_stopped_thread(process, lldb.eStopReasonPlanComplete) self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint condition") ... 2. Get the thread stopped due to a breakpoint ... from lldbutil import get_stopped_thread thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint) self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint") ... """ threads = get_stopped_threads(process, reason) if len(threads) == 0: return None return threads[0] def get_threads_stopped_at_breakpoint_id(process, bpid): """ For a stopped process returns the thread stopped at the breakpoint passed in bkpt""" stopped_threads = [] threads = [] stopped_threads = get_stopped_threads(process, lldb.eStopReasonBreakpoint) if len(stopped_threads) == 0: return threads for thread in stopped_threads: # Make sure we've hit our breakpoint... break_id = thread.GetStopReasonDataAtIndex(0) if break_id == bpid: threads.append(thread) return threads def get_threads_stopped_at_breakpoint(process, bkpt): return get_threads_stopped_at_breakpoint_id(process, bkpt.GetID()) def get_one_thread_stopped_at_breakpoint_id( process, bpid, require_exactly_one=True): threads = get_threads_stopped_at_breakpoint_id(process, bpid) if len(threads) == 0: return None if require_exactly_one and len(threads) != 1: return None return threads[0] def get_one_thread_stopped_at_breakpoint( process, bkpt, require_exactly_one=True): return get_one_thread_stopped_at_breakpoint_id( process, bkpt.GetID(), require_exactly_one) def is_thread_crashed(test, thread): """In the test suite we dereference a null pointer to simulate a crash. The way this is reported depends on the platform.""" if test.platformIsDarwin(): return thread.GetStopReason( ) == lldb.eStopReasonException and "EXC_BAD_ACCESS" in thread.GetStopDescription(100) elif test.getPlatform() == "linux": return thread.GetStopReason() == lldb.eStopReasonSignal and thread.GetStopReasonDataAtIndex( 0) == thread.GetProcess().GetUnixSignals().GetSignalNumberFromName("SIGSEGV") elif test.getPlatform() == "windows": return "Exception 0xc0000005" in thread.GetStopDescription(200) else: return "invalid address" in thread.GetStopDescription(100) def get_crashed_threads(test, process): threads = [] if process.GetState() != lldb.eStateStopped: return threads for thread in process: if is_thread_crashed(test, thread): threads.append(thread) return threads # Helper functions for run_to_{source,name}_breakpoint: def run_to_breakpoint_make_target(test, exe_name = "a.out", in_cwd = True): if in_cwd: exe = test.getBuildArtifact(exe_name) # Create the target target = test.dbg.CreateTarget(exe) test.assertTrue(target, "Target: %s is not valid."%(exe_name)) return target def run_to_breakpoint_do_run(test, target, bkpt, launch_info = None): # Launch the process, and do not stop at the entry point. if not launch_info: launch_info = lldb.SBLaunchInfo(None) launch_info.SetWorkingDirectory(test.get_process_working_directory()) error = lldb.SBError() process = target.Launch(launch_info, error) test.assertTrue(process, "Could not create a valid process for %s: %s"%(target.GetExecutable().GetFilename(), error.GetCString())) # Frame #0 should be at our breakpoint. threads = get_threads_stopped_at_breakpoint( process, bkpt) test.assertTrue(len(threads) == 1, "Expected 1 thread to stop at breakpoint, %d did."%(len(threads))) thread = threads[0] return (target, process, thread, bkpt) def run_to_name_breakpoint (test, bkpt_name, launch_info = None, exe_name = "a.out", bkpt_module = None, in_cwd = True): """Start up a target, using exe_name as the executable, and run it to a breakpoint set by name on bkpt_name restricted to bkpt_module. If you want to pass in launch arguments or environment variables, you can optionally pass in an SBLaunchInfo. If you do that, remember to set the working directory as well. If your executable isn't called a.out, you can pass that in. And if your executable isn't in the CWD, pass in the absolute path to the executable in exe_name, and set in_cwd to False. If you need to restrict the breakpoint to a particular module, pass the module name (a string not a FileSpec) in bkpt_module. If nothing is passed in setting will be unrestricted. If the target isn't valid, the breakpoint isn't found, or hit, the function will cause a testsuite failure. If successful it returns a tuple with the target process and thread that hit the breakpoint, and the breakpoint that we set for you. """ target = run_to_breakpoint_make_target(test, exe_name, in_cwd) breakpoint = target.BreakpointCreateByName(bkpt_name, bkpt_module) test.assertTrue(breakpoint.GetNumLocations() > 0, "No locations found for name breakpoint: '%s'."%(bkpt_name)) return run_to_breakpoint_do_run(test, target, breakpoint, launch_info) def run_to_source_breakpoint(test, bkpt_pattern, source_spec, launch_info = None, exe_name = "a.out", bkpt_module = None, in_cwd = True): """Start up a target, using exe_name as the executable, and run it to a breakpoint set by source regex bkpt_pattern. The rest of the behavior is the same as run_to_name_breakpoint. """ target = run_to_breakpoint_make_target(test, exe_name, in_cwd) # Set the breakpoints breakpoint = target.BreakpointCreateBySourceRegex( bkpt_pattern, source_spec, bkpt_module) test.assertTrue(breakpoint.GetNumLocations() > 0, 'No locations found for source breakpoint: "%s", file: "%s", dir: "%s"' %(bkpt_pattern, source_spec.GetFilename(), source_spec.GetDirectory())) return run_to_breakpoint_do_run(test, target, breakpoint, launch_info) def run_to_line_breakpoint(test, source_spec, line_number, column = 0, launch_info = None, exe_name = "a.out", bkpt_module = None, in_cwd = True): """Start up a target, using exe_name as the executable, and run it to a breakpoint set by (source_spec, line_number(, column)). The rest of the behavior is the same as run_to_name_breakpoint. """ target = run_to_breakpoint_make_target(test, exe_name, in_cwd) # Set the breakpoints breakpoint = target.BreakpointCreateByLocation( source_spec, line_number, column, 0, lldb.SBFileSpecList()) test.assertTrue(breakpoint.GetNumLocations() > 0, 'No locations found for line breakpoint: "%s:%d(:%d)", dir: "%s"' %(source_spec.GetFilename(), line_number, column, source_spec.GetDirectory())) return run_to_breakpoint_do_run(test, target, breakpoint, launch_info) def continue_to_breakpoint(process, bkpt): """ Continues the process, if it stops, returns the threads stopped at bkpt; otherwise, returns None""" process.Continue() if process.GetState() != lldb.eStateStopped: return None else: return get_threads_stopped_at_breakpoint(process, bkpt) def get_caller_symbol(thread): """ Returns the symbol name for the call site of the leaf function. """ depth = thread.GetNumFrames() if depth <= 1: return None caller = thread.GetFrameAtIndex(1).GetSymbol() if caller: return caller.GetName() else: return None def get_function_names(thread): """ Returns a sequence of function names from the stack frames of this thread. """ def GetFuncName(i): return thread.GetFrameAtIndex(i).GetFunctionName() return list(map(GetFuncName, list(range(thread.GetNumFrames())))) def get_symbol_names(thread): """ Returns a sequence of symbols for this thread. """ def GetSymbol(i): return thread.GetFrameAtIndex(i).GetSymbol().GetName() return list(map(GetSymbol, list(range(thread.GetNumFrames())))) def get_pc_addresses(thread): """ Returns a sequence of pc addresses for this thread. """ def GetPCAddress(i): return thread.GetFrameAtIndex(i).GetPCAddress() return list(map(GetPCAddress, list(range(thread.GetNumFrames())))) def get_filenames(thread): """ Returns a sequence of file names from the stack frames of this thread. """ def GetFilename(i): return thread.GetFrameAtIndex( i).GetLineEntry().GetFileSpec().GetFilename() return list(map(GetFilename, list(range(thread.GetNumFrames())))) def get_line_numbers(thread): """ Returns a sequence of line numbers from the stack frames of this thread. """ def GetLineNumber(i): return thread.GetFrameAtIndex(i).GetLineEntry().GetLine() return list(map(GetLineNumber, list(range(thread.GetNumFrames())))) def get_module_names(thread): """ Returns a sequence of module names from the stack frames of this thread. """ def GetModuleName(i): return thread.GetFrameAtIndex( i).GetModule().GetFileSpec().GetFilename() return list(map(GetModuleName, list(range(thread.GetNumFrames())))) def get_stack_frames(thread): """ Returns a sequence of stack frames for this thread. """ def GetStackFrame(i): return thread.GetFrameAtIndex(i) return list(map(GetStackFrame, list(range(thread.GetNumFrames())))) def print_stacktrace(thread, string_buffer=False): """Prints a simple stack trace of this thread.""" output = SixStringIO() if string_buffer else sys.stdout target = thread.GetProcess().GetTarget() depth = thread.GetNumFrames() mods = get_module_names(thread) funcs = get_function_names(thread) symbols = get_symbol_names(thread) files = get_filenames(thread) lines = get_line_numbers(thread) addrs = get_pc_addresses(thread) if thread.GetStopReason() != lldb.eStopReasonInvalid: desc = "stop reason=" + stop_reason_to_str(thread.GetStopReason()) else: desc = "" print( "Stack trace for thread id={0:#x} name={1} queue={2} ".format( thread.GetThreadID(), thread.GetName(), thread.GetQueueName()) + desc, file=output) for i in range(depth): frame = thread.GetFrameAtIndex(i) function = frame.GetFunction() load_addr = addrs[i].GetLoadAddress(target) if not function: file_addr = addrs[i].GetFileAddress() start_addr = frame.GetSymbol().GetStartAddress().GetFileAddress() symbol_offset = file_addr - start_addr print( " frame #{num}: {addr:#016x} {mod}`{symbol} + {offset}".format( num=i, addr=load_addr, mod=mods[i], symbol=symbols[i], offset=symbol_offset), file=output) else: print( " frame #{num}: {addr:#016x} {mod}`{func} at {file}:{line} {args}".format( num=i, addr=load_addr, mod=mods[i], func='%s [inlined]' % funcs[i] if frame.IsInlined() else funcs[i], file=files[i], line=lines[i], args=get_args_as_string( frame, showFuncName=False) if not frame.IsInlined() else '()'), file=output) if string_buffer: return output.getvalue() def print_stacktraces(process, string_buffer=False): """Prints the stack traces of all the threads.""" output = SixStringIO() if string_buffer else sys.stdout print("Stack traces for " + str(process), file=output) for thread in process: print(print_stacktrace(thread, string_buffer=True), file=output) if string_buffer: return output.getvalue() def expect_state_changes(test, listener, process, states, timeout=5): """Listens for state changed events on the listener and makes sure they match what we expect. Stop-and-restart events (where GetRestartedFromEvent() returns true) are ignored.""" for expected_state in states: def get_next_event(): event = lldb.SBEvent() if not listener.WaitForEventForBroadcasterWithType( timeout, process.GetBroadcaster(), lldb.SBProcess.eBroadcastBitStateChanged, event): test.fail( "Timed out while waiting for a transition to state %s" % lldb.SBDebugger.StateAsCString(expected_state)) return event event = get_next_event() while (lldb.SBProcess.GetStateFromEvent(event) == lldb.eStateStopped and lldb.SBProcess.GetRestartedFromEvent(event)): # Ignore restarted event and the subsequent running event. event = get_next_event() test.assertEqual( lldb.SBProcess.GetStateFromEvent(event), lldb.eStateRunning, "Restarted event followed by a running event") event = get_next_event() test.assertEqual( lldb.SBProcess.GetStateFromEvent(event), expected_state) # =================================== # Utility functions related to Frames # =================================== def get_parent_frame(frame): """ Returns the parent frame of the input frame object; None if not available. """ thread = frame.GetThread() parent_found = False for f in thread: if parent_found: return f if f.GetFrameID() == frame.GetFrameID(): parent_found = True # If we reach here, no parent has been found, return None. return None def get_args_as_string(frame, showFuncName=True): """ Returns the args of the input frame object as a string. """ # arguments => True # locals => False # statics => False # in_scope_only => True vars = frame.GetVariables(True, False, False, True) # type of SBValueList args = [] # list of strings for var in vars: args.append("(%s)%s=%s" % (var.GetTypeName(), var.GetName(), var.GetValue())) if frame.GetFunction(): name = frame.GetFunction().GetName() elif frame.GetSymbol(): name = frame.GetSymbol().GetName() else: name = "" if showFuncName: return "%s(%s)" % (name, ", ".join(args)) else: return "(%s)" % (", ".join(args)) def print_registers(frame, string_buffer=False): """Prints all the register sets of the frame.""" output = SixStringIO() if string_buffer else sys.stdout print("Register sets for " + str(frame), file=output) registerSet = frame.GetRegisters() # Return type of SBValueList. print("Frame registers (size of register set = %d):" % registerSet.GetSize(), file=output) for value in registerSet: #print(value, file=output) print("%s (number of children = %d):" % (value.GetName(), value.GetNumChildren()), file=output) for child in value: print( "Name: %s, Value: %s" % (child.GetName(), child.GetValue()), file=output) if string_buffer: return output.getvalue() def get_registers(frame, kind): """Returns the registers given the frame and the kind of registers desired. Returns None if there's no such kind. """ registerSet = frame.GetRegisters() # Return type of SBValueList. for value in registerSet: if kind.lower() in value.GetName().lower(): return value return None def get_GPRs(frame): """Returns the general purpose registers of the frame as an SBValue. The returned SBValue object is iterable. An example: ... from lldbutil import get_GPRs regs = get_GPRs(frame) for reg in regs: print("%s => %s" % (reg.GetName(), reg.GetValue())) ... """ return get_registers(frame, "general purpose") def get_FPRs(frame): """Returns the floating point registers of the frame as an SBValue. The returned SBValue object is iterable. An example: ... from lldbutil import get_FPRs regs = get_FPRs(frame) for reg in regs: print("%s => %s" % (reg.GetName(), reg.GetValue())) ... """ return get_registers(frame, "floating point") def get_ESRs(frame): """Returns the exception state registers of the frame as an SBValue. The returned SBValue object is iterable. An example: ... from lldbutil import get_ESRs regs = get_ESRs(frame) for reg in regs: print("%s => %s" % (reg.GetName(), reg.GetValue())) ... """ return get_registers(frame, "exception state") # ====================================== # Utility classes/functions for SBValues # ====================================== class BasicFormatter(object): """The basic formatter inspects the value object and prints the value.""" def format(self, value, buffer=None, indent=0): if not buffer: output = SixStringIO() else: output = buffer # If there is a summary, it suffices. val = value.GetSummary() # Otherwise, get the value. if val is None: val = value.GetValue() if val is None and value.GetNumChildren() > 0: val = "%s (location)" % value.GetLocation() print("{indentation}({type}) {name} = {value}".format( indentation=' ' * indent, type=value.GetTypeName(), name=value.GetName(), value=val), file=output) return output.getvalue() class ChildVisitingFormatter(BasicFormatter): """The child visiting formatter prints the value and its immediate children. The constructor takes a keyword arg: indent_child, which defaults to 2. """ def __init__(self, indent_child=2): """Default indentation of 2 SPC's for the children.""" self.cindent = indent_child def format(self, value, buffer=None): if not buffer: output = SixStringIO() else: output = buffer BasicFormatter.format(self, value, buffer=output) for child in value: BasicFormatter.format( self, child, buffer=output, indent=self.cindent) return output.getvalue() class RecursiveDecentFormatter(BasicFormatter): """The recursive decent formatter prints the value and the decendents. The constructor takes two keyword args: indent_level, which defaults to 0, and indent_child, which defaults to 2. The current indentation level is determined by indent_level, while the immediate children has an additional indentation by inden_child. """ def __init__(self, indent_level=0, indent_child=2): self.lindent = indent_level self.cindent = indent_child def format(self, value, buffer=None): if not buffer: output = SixStringIO() else: output = buffer BasicFormatter.format(self, value, buffer=output, indent=self.lindent) new_indent = self.lindent + self.cindent for child in value: if child.GetSummary() is not None: BasicFormatter.format( self, child, buffer=output, indent=new_indent) else: if child.GetNumChildren() > 0: rdf = RecursiveDecentFormatter(indent_level=new_indent) rdf.format(child, buffer=output) else: BasicFormatter.format( self, child, buffer=output, indent=new_indent) return output.getvalue() # =========================================================== # Utility functions for path manipulation on remote platforms # =========================================================== def join_remote_paths(*paths): # TODO: update with actual platform name for remote windows once it exists if lldb.remote_platform.GetName() == 'remote-windows': return os.path.join(*paths).replace(os.path.sep, '\\') return os.path.join(*paths).replace(os.path.sep, '/') def append_to_process_working_directory(test, *paths): remote = lldb.remote_platform if remote: return join_remote_paths(remote.GetWorkingDirectory(), *paths) return os.path.join(test.getBuildDir(), *paths) # ================================================== # Utility functions to get the correct signal number # ================================================== import signal def get_signal_number(signal_name): platform = lldb.remote_platform if platform and platform.IsValid(): signals = platform.GetUnixSignals() if signals.IsValid(): signal_number = signals.GetSignalNumberFromName(signal_name) if signal_number > 0: return signal_number # No remote platform; fall back to using local python signals. return getattr(signal, signal_name) class PrintableRegex(object): def __init__(self, text): self.regex = re.compile(text) self.text = text def match(self, str): return self.regex.match(str) def __str__(self): return "%s" % (self.text) def __repr__(self): return "re.compile(%s) -> %s" % (self.text, self.regex) def skip_if_callable(test, mycallable, reason): if six.callable(mycallable): if mycallable(test): test.skipTest(reason) return True return False def skip_if_library_missing(test, target, library): def find_library(target, library): for module in target.modules: filename = module.file.GetFilename() if isinstance(library, str): if library == filename: return False elif hasattr(library, 'match'): if library.match(filename): return False return True def find_library_callable(test): return find_library(target, library) return skip_if_callable( test, find_library_callable, "could not find library matching '%s' in target %s" % (library, target)) def read_file_on_target(test, remote): if lldb.remote_platform: local = test.getBuildArtifact("file_from_target") error = lldb.remote_platform.Get(lldb.SBFileSpec(remote, False), lldb.SBFileSpec(local, True)) test.assertTrue(error.Success(), "Reading file {0} failed: {1}".format(remote, error)) else: local = remote with open(local, 'r') as f: return f.read() def read_file_from_process_wd(test, name): path = append_to_process_working_directory(test, name) return read_file_on_target(test, path) def wait_for_file_on_target(testcase, file_path, max_attempts=6): for i in range(max_attempts): err, retcode, msg = testcase.run_platform_command("ls %s" % file_path) if err.Success() and retcode == 0: break if i < max_attempts: # Exponential backoff! import time time.sleep(pow(2, i) * 0.25) else: testcase.fail( "File %s not found even after %d attempts." % (file_path, max_attempts)) return read_file_on_target(testcase, file_path)
apache-2.0
benoitsteiner/tensorflow-xsmm
tensorflow/python/keras/layers/recurrent_test.py
9
20464
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN. See also: lstm_test.py, gru_test.py, simplernn_test.py. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.platform import test class RNNTest(test.TestCase): def test_minimal_rnn_cell_non_layer(self): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = units self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output = states[0] output = keras.backend.dot(inputs, self.kernel) + prev_output return output, [output] with self.test_session(): # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(32, 8), MinimalRNNCell(32, 32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_minimal_rnn_cell_non_layer_multiple_states(self): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = (units, units) self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output_1 = states[0] prev_output_2 = states[1] output = keras.backend.dot(inputs, self.kernel) output += prev_output_1 output -= prev_output_2 return output, [output * 2, output * 3] with self.test_session(): # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(16, 8), MinimalRNNCell(32, 16)] layer = keras.layers.RNN(cells) assert layer.cell.state_size == (32, 32, 16, 16, 8, 8) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_minimal_rnn_cell_layer(self): class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = keras.backend.dot(inputs, self.kernel) output = h + keras.backend.dot(prev_output, self.recurrent_kernel) return output, [output] def get_config(self): config = {'units': self.units} base_config = super(MinimalRNNCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) with self.test_session(): # Test basic case. x = keras.Input((None, 5)) cell = MinimalRNNCell(32) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}): layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) # Test stacking. cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacked RNN serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}): layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) def test_rnn_cell_with_constants_layer(self): class RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): if not isinstance(input_shape, list): raise TypeError('expects constants shape') [input_shape, constant_shape] = input_shape # will (and should) raise if more than one constant passed self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(constant_shape[-1], self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) with self.test_session(): # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) cell = RNNCellWithConstants(32) layer = keras.layers.RNN(cell) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) with self.test_session(): # Test basic case serialization. x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) with self.test_session(): # test flat list inputs. with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer([x, c]) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_3, atol=1e-4) with self.test_session(): # Test stacking. cells = [keras.layers.recurrent.GRUCell(8), RNNCellWithConstants(12), RNNCellWithConstants(32)] layer = keras.layers.recurrent.RNN(cells) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) with self.test_session(): # Test GRUCell reset_after property. x = keras.Input((None, 5)) c = keras.Input((3,)) cells = [keras.layers.recurrent.GRUCell(32, reset_after=True)] layer = keras.layers.recurrent.RNN(cells) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) with self.test_session(): # Test stacked RNN serialization x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.recurrent.RNN.from_config(config.copy()) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) def test_rnn_cell_with_constants_layer_passing_initial_state(self): class RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): if not isinstance(input_shape, list): raise TypeError('expects constants shape') [input_shape, constant_shape] = input_shape # will (and should) raise if more than one constant passed self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(constant_shape[-1], self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) with self.test_session(): # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) s = keras.Input((32,)) cell = RNNCellWithConstants(32) layer = keras.layers.RNN(cell) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))], np.zeros((6, 32)) ) with self.test_session(): # Test basic case serialization. x_np = np.random.random((6, 5, 5)) s_np = np.random.random((6, 32)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, s_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, s_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) # verify that state is used y_np_2_different_s = model.predict([x_np, s_np + 10., c_np]) with self.assertRaises(AssertionError): self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4) with self.test_session(): # test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer([x, s, c]) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, s_np, c_np]) self.assertAllClose(y_np, y_np_3, atol=1e-4) def test_stacked_rnn_attributes(self): cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)] layer = keras.layers.RNN(cells) layer.build((None, None, 1)) # Test weights self.assertEqual(len(layer.trainable_weights), 6) cells[0].trainable = False self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 3) # Test `get_losses_for` and `losses` x = keras.Input((None, 1)) loss_1 = math_ops.reduce_sum(x) loss_2 = math_ops.reduce_sum(cells[0].kernel) cells[0].add_loss(loss_1, inputs=x) cells[0].add_loss(loss_2) self.assertEqual(len(layer.losses), 2) self.assertEqual(layer.get_losses_for(None), [loss_2]) self.assertEqual(layer.get_losses_for(x), [loss_1]) # Test `get_updates_for` and `updates` cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)] layer = keras.layers.RNN(cells) layer.build((None, None, 1)) x = keras.Input((None, 1)) update_1 = state_ops.assign_add(cells[0].kernel, x[0, 0, 0] * cells[0].kernel) update_2 = state_ops.assign_add(cells[0].kernel, array_ops.ones_like(cells[0].kernel)) cells[0].add_update(update_1, inputs=x) cells[0].add_update(update_2) self.assertEqual(len(layer.updates), 2) self.assertEqual(len(layer.get_updates_for(None)), 1) self.assertEqual(len(layer.get_updates_for(x)), 1) def test_rnn_dynamic_trainability(self): layer_class = keras.layers.SimpleRNN embedding_dim = 4 units = 3 layer = layer_class(units) layer.build((None, None, embedding_dim)) self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) layer.trainable = False self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.non_trainable_weights), 3) layer.trainable = True self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) def test_state_reuse_with_dropout(self): layer_class = keras.layers.SimpleRNN embedding_dim = 4 units = 3 timesteps = 2 num_samples = 2 with self.test_session(): input1 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = layer_class(units, return_state=True, return_sequences=True, dropout=0.2) state = layer(input1)[1:] input2 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim)) output = layer_class(units)(input2, initial_state=state) model = keras.Model([input1, input2], output) inputs = [np.random.random((num_samples, timesteps, embedding_dim)), np.random.random((num_samples, timesteps, embedding_dim))] model.predict(inputs) def test_builtin_rnn_cell_serialization(self): for cell_class in [keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell]: with self.test_session(): # Test basic case. x = keras.Input((None, 5)) cell = cell_class(32) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) # Test stacking. cells = [cell_class(8), cell_class(12), cell_class(32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') # Test stacked RNN serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) def test_stacked_rnn_dropout(self): cells = [keras.layers.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1), keras.layers.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1)] layer = keras.layers.RNN(cells) with self.test_session(): x = keras.Input((None, 5)) y = layer(x) model = keras.models.Model(x, y) model.compile('sgd', 'mse') x_np = np.random.random((6, 5, 5)) y_np = np.random.random((6, 3)) model.train_on_batch(x_np, y_np) def test_stacked_rnn_compute_output_shape(self): cells = [keras.layers.LSTMCell(3), keras.layers.LSTMCell(6)] embedding_dim = 4 timesteps = 2 layer = keras.layers.RNN(cells, return_state=True, return_sequences=True) output_shape = layer.compute_output_shape((None, timesteps, embedding_dim)) expected_output_shape = [(None, timesteps, 6), (None, 6), (None, 6), (None, 3), (None, 3)] self.assertEqual( [tuple(o.as_list()) for o in output_shape], expected_output_shape) if __name__ == '__main__': test.main()
apache-2.0
dmccue/ansible
contrib/inventory/openshift.py
132
3564
#!/usr/bin/env python # (c) 2013, Michael Scherer <misc@zarb.org> # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- inventory: openshift short_description: Openshift gears external inventory script description: - Generates inventory of Openshift gears using the REST interface - this permit to reuse playbook to setup an Openshift gear version_added: None author: Michael Scherer ''' import urllib2 try: import json except ImportError: import simplejson as json import os import os.path import sys import ConfigParser import StringIO configparser = None def get_from_rhc_config(variable): global configparser CONF_FILE = os.path.expanduser('~/.openshift/express.conf') if os.path.exists(CONF_FILE): if not configparser: ini_str = '[root]\n' + open(CONF_FILE, 'r').read() configparser = ConfigParser.SafeConfigParser() configparser.readfp(StringIO.StringIO(ini_str)) try: return configparser.get('root', variable) except ConfigParser.NoOptionError: return None def get_config(env_var, config_var): result = os.getenv(env_var) if not result: result = get_from_rhc_config(config_var) if not result: print "failed=True msg='missing %s'" % env_var sys.exit(1) return result def get_json_from_api(url): req = urllib2.Request(url, None, {'Accept': 'application/json; version=1.5'}) response = urllib2.urlopen(req) return json.loads(response.read())['data'] def passwd_setup(top_level_url, username, password): # create a password manager password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, top_level_url, username, password) handler = urllib2.HTTPBasicAuthHandler(password_mgr) opener = urllib2.build_opener(handler) urllib2.install_opener(opener) username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin') password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password') broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server') passwd_setup(broker_url, username, password) response = get_json_from_api(broker_url + '/domains') response = get_json_from_api("%s/domains/%s/applications" % (broker_url, response[0]['id'])) result = {} for app in response: # ssh://520311404832ce3e570000ff@blog-johndoe.example.org (user, host) = app['ssh_url'][6:].split('@') app_name = host.split('-')[0] result[app_name] = {} result[app_name]['hosts'] = [] result[app_name]['hosts'].append(host) result[app_name]['vars'] = {} result[app_name]['vars']['ansible_ssh_user'] = user if len(sys.argv) == 2 and sys.argv[1] == '--list': print json.dumps(result) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print json.dumps({}) else: print "Need an argument, either --list or --host <host>"
gpl-3.0
sungeunbae/resbaz_lessons
r-novice-inflammation/tools/filters/blockquote2div.py
19
4937
#!/usr/bin/env python """Pandoc filter to convert Blockquotes with attributes into Div with attributes. Usage: pandoc source.md --filter=blockquote2div.py --output=output.html A blockquote will be converted if 1. it begins with a header 2. that either 1. matches "Prerequisites", "Objectives", "Callout" or "Challenge" OR 2. has attributes containing a single class matching one of ['prereq', 'objectives', 'callout', 'challenge'] For example, this is a valid blockquote: > ## Callout time! {.callout} > Let's do something and it will be converted into this markdown: <div class='callout panel panel-info'> ## Callout time! Let's do something. </div> This is also a valid blockquote: > ## Prerequisites > Breakfast! and it will be converted into this markdown: <div class='prereq panel panel-warning'> ## Prerequisites Breakfast! </div> For debugging purposes you may find it useful to test the filter like this: pandoc source.md --to json | python blockquote2div.py | pandoc --from json """ import pandocfilters as pf # These are classes that, if set on the title of a blockquote, will # trigger the blockquote to be converted to a div. SPECIAL_CLASSES = { "callout": ("panel-info", "glyphicon-pushpin"), "challenge": ("panel-success", "glyphicon-pencil"), "prereq": ("panel-warning", "glyphicon-education"), "getready": ("panel-warning", "glyphicon-check"), "objectives": ("panel-warning", "glyphicon-certificate"), } def find_header(blockquote): """Find attributes in a blockquote if they are defined on a header that is the first thing in the block quote. Returns the attributes, a list [id, classes, kvs] where id = str, classes = list, kvs = list of key, value pairs """ if blockquote[0]['t'] == 'Header': level, attr, inline = blockquote[0]['c'] return level, attr, inline def blockquote2div(key, value, format, meta): """Convert a blockquote into a div if it begins with a header that has attributes containing a single class that is in the allowed classes. This function can be passed directly to toJSONFilter from pandocfilters. """ if key == 'BlockQuote': blockquote = value header = find_header(blockquote) if not header: return else: level, attr, inlines = header id, classes, kvs = attr if len(classes) == 1 and classes[0] in SPECIAL_CLASSES: panel_kind, glyphicon_kind = SPECIAL_CLASSES[classes[0]] h_level, h_attr, h_inlines = blockquote[0]['c'] # insert an icon as the first sub-item of the header span = pf.Span(["", ["glyphicon", glyphicon_kind], []], []) h_inlines.insert(0, span) # only the header goes into panel-heading header = pf.Header(h_level, [h_attr[0], [], []], h_inlines) panel_header = pf.Div(("", ["panel-heading"], []), [header]) # the rest of the blockquote goes into panel-body panel_body = pf.Div(("", ["panel-body"], []), blockquote[1:]) # apply Bootstrap panel classes to the div classes.append("panel") classes.append(panel_kind) # a blockquote is just a list of blocks, so it can be # passed directly to Div, which expects Div(attr, blocks) if classes[0] == "callout": return [{"t": "RawBlock", "c": [ "html", "<aside class=\"{0}\">".format(' '.join(classes)) ]}, panel_header, panel_body, {"t": "RawBlock", "c": [ "html", "</aside>" ]}] else: return [{"t": "RawBlock", "c": [ "html", "<section class=\"{0}\">".format(' '.join(classes)) ]}, panel_header, panel_body, {"t": "RawBlock", "c": [ "html", "</section>" ]}] if __name__ == '__main__': # pandocfilters.toJSONFilter is a convenience method that # makes a command line json filter from a given function. # JSON emitted from pandoc is read from stdin. The JSON tree is # walked, with the function being applied to each element in the # tree. # # The function passed to to JSONFilter must accept (key, value, # format, metadata) as arguments: # # key - element type (e.g. 'Str', 'Header') # value - element contents # format - destination format # metadata - document metadata # # The function return values determine what happens to the # element: # returns None: the element is unmodified; # returns []: delete the element # otherwise: replace the element with the return value # # The JSON is then output to stdout, where it can be consumed by # pandoc. pf.toJSONFilter(blockquote2div)
mit
darrencheng0817/AlgorithmLearning
Python/leetcode/NumberOfIslandsIi.py
1
2426
''' Created on 1.12.2016 @author: Darren ''' ''' A 2d grid map of m rows and n columns is initially filled with water. We may perform an addLand operation which turns the water at position (row, col) into a land. Given a list of positions to operate, count the number of islands after each addLand operation. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water. Example: Given m = 3, n = 3, positions = [[0,0], [0,1], [1,2], [2,1]]. Initially, the 2d grid grid is filled with water. (Assume 0 represents water and 1 represents land). 0 0 0 0 0 0 0 0 0 Operation #1: addLand(0, 0) turns the water at grid[0][0] into a land. 1 0 0 0 0 0 Number of islands = 1 0 0 0 Operation #2: addLand(0, 1) turns the water at grid[0][1] into a land. 1 1 0 0 0 0 Number of islands = 1 0 0 0 Operation #3: addLand(1, 2) turns the water at grid[1][2] into a land. 1 1 0 0 0 1 Number of islands = 2 0 0 0 Operation #4: addLand(2, 1) turns the water at grid[2][1] into a land. 1 1 0 0 0 1 Number of islands = 3 0 1 0 We return the result as an array: [1, 1, 2, 3] Challenge: Can you do it in time complexity O(k log mn), where k is the length of the positions? ''' class Solution(object): def numIslands2(self, m, n, positions): """ :type m: int :type n: int :type positions: List[List[int]] :rtype: List[int] """ ans = [] islands = Union() for p in map(tuple, positions): islands.add(p) for dp in (0, 1), (0, -1), (1, 0), (-1, 0): q = (p[0] + dp[0], p[1] + dp[1]) if q in islands.id: islands.unite(p, q) ans += [islands.count] return ans class Union(object): def __init__(self): self.id = {} self.sz = {} self.count = 0 def add(self, p): self.id[p] = p self.sz[p] = 1 self.count += 1 def root(self, i): while i != self.id[i]: self.id[i] = self.id[self.id[i]] i = self.id[i] return i def unite(self, p, q): i, j = self.root(p), self.root(q) if i == j: return if self.sz[i] > self.sz[j]: i, j = j, i self.id[i] = j self.sz[j] += self.sz[i] self.count -= 1
mit
apple/llvm-project
lldb/test/API/commands/expression/import-std-module/forward_decl_from_module/TestForwardDeclFromStdModule.py
5
1586
""" Tests forward declarations coming from the `std` module. """ from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil import os class TestCase(TestBase): mydir = TestBase.compute_mydir(__file__) # We only emulate a fake libc++ in this test and don't use the real libc++, # but we still add the libc++ category so that this test is only run in # test configurations where libc++ is actually supposed to be tested. @add_test_categories(["libc++"]) @skipIfRemote @skipIf(compiler=no_match("clang")) def test(self): self.build() sysroot = os.path.join(os.getcwd(), "root") # Set the sysroot where our dummy libc++ exists. self.runCmd("platform select --sysroot '" + sysroot + "' host", CURRENT_EXECUTABLE_SET) lldbutil.run_to_source_breakpoint(self, "// Set break point at this line.", lldb.SBFileSpec("main.cpp")) self.runCmd("settings set target.import-std-module true") # Print the dummy `std::vector`. It only has the dummy member in it # so the standard `std::vector` formatter can't format it. Instead use # the raw output so LLDB has to show the member variable. # Both `std::vector` and the type of the member have forward # declarations before their definitions. self.expect("expr --raw -- v", substrs=['(std::__1::vector<int>) $0 = {', 'f = nullptr', '}'])
apache-2.0
837468220/python-for-android
python-build/python-libs/gdata/src/gdata/Crypto/Protocol/AllOrNothing.py
226
10952
"""This file implements all-or-nothing package transformations. An all-or-nothing package transformation is one in which some text is transformed into message blocks, such that all blocks must be obtained before the reverse transformation can be applied. Thus, if any blocks are corrupted or lost, the original message cannot be reproduced. An all-or-nothing package transformation is not encryption, although a block cipher algorithm is used. The encryption key is randomly generated and is extractable from the message blocks. This class implements the All-Or-Nothing package transformation algorithm described in: Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform" http://theory.lcs.mit.edu/~rivest/fusion.pdf """ __revision__ = "$Id: AllOrNothing.py,v 1.8 2003/02/28 15:23:20 akuchling Exp $" import operator import string from Crypto.Util.number import bytes_to_long, long_to_bytes class AllOrNothing: """Class implementing the All-or-Nothing package transform. Methods for subclassing: _inventkey(key_size): Returns a randomly generated key. Subclasses can use this to implement better random key generating algorithms. The default algorithm is probably not very cryptographically secure. """ def __init__(self, ciphermodule, mode=None, IV=None): """AllOrNothing(ciphermodule, mode=None, IV=None) ciphermodule is a module implementing the cipher algorithm to use. It must provide the PEP272 interface. Note that the encryption key is randomly generated automatically when needed. Optional arguments mode and IV are passed directly through to the ciphermodule.new() method; they are the feedback mode and initialization vector to use. All three arguments must be the same for the object used to create the digest, and to undigest'ify the message blocks. """ self.__ciphermodule = ciphermodule self.__mode = mode self.__IV = IV self.__key_size = ciphermodule.key_size if self.__key_size == 0: self.__key_size = 16 __K0digit = chr(0x69) def digest(self, text): """digest(text:string) : [string] Perform the All-or-Nothing package transform on the given string. Output is a list of message blocks describing the transformed text, where each block is a string of bit length equal to the ciphermodule's block_size. """ # generate a random session key and K0, the key used to encrypt the # hash blocks. Rivest calls this a fixed, publically-known encryption # key, but says nothing about the security implications of this key or # how to choose it. key = self._inventkey(self.__key_size) K0 = self.__K0digit * self.__key_size # we need two cipher objects here, one that is used to encrypt the # message blocks and one that is used to encrypt the hashes. The # former uses the randomly generated key, while the latter uses the # well-known key. mcipher = self.__newcipher(key) hcipher = self.__newcipher(K0) # Pad the text so that its length is a multiple of the cipher's # block_size. Pad with trailing spaces, which will be eliminated in # the undigest() step. block_size = self.__ciphermodule.block_size padbytes = block_size - (len(text) % block_size) text = text + ' ' * padbytes # Run through the algorithm: # s: number of message blocks (size of text / block_size) # input sequence: m1, m2, ... ms # random key K' (`key' in the code) # Compute output sequence: m'1, m'2, ... m's' for s' = s + 1 # Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s # Let m's' = K' ^ h1 ^ h2 ^ ... hs # where hi = E(K0, m'i ^ i) for i = 1, 2, ... s # # The one complication I add is that the last message block is hard # coded to the number of padbytes added, so that these can be stripped # during the undigest() step s = len(text) / block_size blocks = [] hashes = [] for i in range(1, s+1): start = (i-1) * block_size end = start + block_size mi = text[start:end] assert len(mi) == block_size cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock) blocks.append(mticki) # calculate the hash block for this block hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size)) hashes.append(bytes_to_long(hi)) # Add the padbytes length as a message block i = i + 1 cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) mticki = padbytes ^ bytes_to_long(cipherblock) blocks.append(mticki) # calculate this block's hash hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size)) hashes.append(bytes_to_long(hi)) # Now calculate the last message block of the sequence 1..s'. This # will contain the random session key XOR'd with all the hash blocks, # so that for undigest(), once all the hash blocks are calculated, the # session key can be trivially extracted. Calculating all the hash # blocks requires that all the message blocks be received, thus the # All-or-Nothing algorithm succeeds. mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes) blocks.append(mtick_stick) # we convert the blocks to strings since in Python, byte sequences are # always represented as strings. This is more consistent with the # model that encryption and hash algorithms always operate on strings. return map(long_to_bytes, blocks) def undigest(self, blocks): """undigest(blocks : [string]) : string Perform the reverse package transformation on a list of message blocks. Note that the ciphermodule used for both transformations must be the same. blocks is a list of strings of bit length equal to the ciphermodule's block_size. """ # better have at least 2 blocks, for the padbytes package and the hash # block accumulator if len(blocks) < 2: raise ValueError, "List must be at least length 2." # blocks is a list of strings. We need to deal with them as long # integers blocks = map(bytes_to_long, blocks) # Calculate the well-known key, to which the hash blocks are # encrypted, and create the hash cipher. K0 = self.__K0digit * self.__key_size hcipher = self.__newcipher(K0) # Since we have all the blocks (or this method would have been called # prematurely), we can calcualte all the hash blocks. hashes = [] for i in range(1, len(blocks)): mticki = blocks[i-1] ^ i hi = hcipher.encrypt(long_to_bytes(mticki)) hashes.append(bytes_to_long(hi)) # now we can calculate K' (key). remember the last block contains # m's' which we don't include here key = blocks[-1] ^ reduce(operator.xor, hashes) # and now we can create the cipher object mcipher = self.__newcipher(long_to_bytes(key)) block_size = self.__ciphermodule.block_size # And we can now decode the original message blocks parts = [] for i in range(1, len(blocks)): cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) mi = blocks[i-1] ^ bytes_to_long(cipherblock) parts.append(mi) # The last message block contains the number of pad bytes appended to # the original text string, such that its length was an even multiple # of the cipher's block_size. This number should be small enough that # the conversion from long integer to integer should never overflow padbytes = int(parts[-1]) text = string.join(map(long_to_bytes, parts[:-1]), '') return text[:-padbytes] def _inventkey(self, key_size): # TBD: Not a very secure algorithm. Eventually, I'd like to use JHy's # kernelrand module import time from Crypto.Util import randpool # TBD: key_size * 2 to work around possible bug in RandomPool? pool = randpool.RandomPool(key_size * 2) while key_size > pool.entropy: pool.add_event() # we now have enough entropy in the pool to get a key_size'd key return pool.get_bytes(key_size) def __newcipher(self, key): if self.__mode is None and self.__IV is None: return self.__ciphermodule.new(key) elif self.__IV is None: return self.__ciphermodule.new(key, self.__mode) else: return self.__ciphermodule.new(key, self.__mode, self.__IV) if __name__ == '__main__': import sys import getopt import base64 usagemsg = '''\ Test module usage: %(program)s [-c cipher] [-l] [-h] Where: --cipher module -c module Cipher module to use. Default: %(ciphermodule)s --aslong -l Print the encoded message blocks as long integers instead of base64 encoded strings --help -h Print this help message ''' ciphermodule = 'AES' aslong = 0 def usage(code, msg=None): if msg: print msg print usagemsg % {'program': sys.argv[0], 'ciphermodule': ciphermodule} sys.exit(code) try: opts, args = getopt.getopt(sys.argv[1:], 'c:l', ['cipher=', 'aslong']) except getopt.error, msg: usage(1, msg) if args: usage(1, 'Too many arguments') for opt, arg in opts: if opt in ('-h', '--help'): usage(0) elif opt in ('-c', '--cipher'): ciphermodule = arg elif opt in ('-l', '--aslong'): aslong = 1 # ugly hack to force __import__ to give us the end-path module module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new']) a = AllOrNothing(module) print 'Original text:\n==========' print __doc__ print '==========' msgblocks = a.digest(__doc__) print 'message blocks:' for i, blk in map(None, range(len(msgblocks)), msgblocks): # base64 adds a trailing newline print ' %3d' % i, if aslong: print bytes_to_long(blk) else: print base64.encodestring(blk)[:-1] # # get a new undigest-only object so there's no leakage b = AllOrNothing(module) text = b.undigest(msgblocks) if text == __doc__: print 'They match!' else: print 'They differ!'
apache-2.0
mindbody/API-Examples
SDKs/Python/swagger_client/models/get_class_visits_request.py
1
4453
# coding: utf-8 """ MINDBODY Public API No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: v6 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class GetClassVisitsRequest(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'class_id': 'int', 'last_modified_date': 'datetime' } attribute_map = { 'class_id': 'ClassID', 'last_modified_date': 'LastModifiedDate' } def __init__(self, class_id=None, last_modified_date=None): # noqa: E501 """GetClassVisitsRequest - a model defined in Swagger""" # noqa: E501 self._class_id = None self._last_modified_date = None self.discriminator = None if class_id is not None: self.class_id = class_id if last_modified_date is not None: self.last_modified_date = last_modified_date @property def class_id(self): """Gets the class_id of this GetClassVisitsRequest. # noqa: E501 The class ID. # noqa: E501 :return: The class_id of this GetClassVisitsRequest. # noqa: E501 :rtype: int """ return self._class_id @class_id.setter def class_id(self, class_id): """Sets the class_id of this GetClassVisitsRequest. The class ID. # noqa: E501 :param class_id: The class_id of this GetClassVisitsRequest. # noqa: E501 :type: int """ self._class_id = class_id @property def last_modified_date(self): """Gets the last_modified_date of this GetClassVisitsRequest. # noqa: E501 When included in the request, only records modified on or after the `LastModifiedDate` specified are included in the response. # noqa: E501 :return: The last_modified_date of this GetClassVisitsRequest. # noqa: E501 :rtype: datetime """ return self._last_modified_date @last_modified_date.setter def last_modified_date(self, last_modified_date): """Sets the last_modified_date of this GetClassVisitsRequest. When included in the request, only records modified on or after the `LastModifiedDate` specified are included in the response. # noqa: E501 :param last_modified_date: The last_modified_date of this GetClassVisitsRequest. # noqa: E501 :type: datetime """ self._last_modified_date = last_modified_date def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(GetClassVisitsRequest, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, GetClassVisitsRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
bsd-2-clause
sarvex/tensorflow
tensorflow/python/training/basic_session_run_hooks.py
18
42161
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Some common SessionRunHook classes. Note that the symbols that are exported to v1 tf.train namespace are also exported to v2 in tf.estimator namespace. See https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import numpy as np import six from tensorflow.core.framework.summary_pb2 import Summary from tensorflow.core.protobuf import config_pb2 from tensorflow.core.util.event_pb2 import SessionLog from tensorflow.python.client import timeline from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import session_run_hook from tensorflow.python.training import training_util from tensorflow.python.training.session_run_hook import SessionRunArgs from tensorflow.python.training.summary_io import SummaryWriterCache from tensorflow.python.util.tf_export import tf_export _HOOKS = "hooks" _STEPS_PER_RUN_VAR = "steps_per_run" class _HookTimer(object): """Base timer for determining when Hooks should trigger. Should not be instantiated directly. """ def __init__(self): pass def reset(self): """Resets the timer.""" pass def should_trigger_for_step(self, step): """Return true if the timer should trigger for the specified step.""" raise NotImplementedError def update_last_triggered_step(self, step): """Update the last triggered time and step number. Args: step: The current step. Returns: A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number of seconds between the current trigger and the last one (a float), and `elapsed_steps` is the number of steps between the current trigger and the last one. Both values will be set to `None` on the first trigger. """ raise NotImplementedError def last_triggered_step(self): """Returns the last triggered time step or None if never triggered.""" raise NotImplementedError @tf_export(v1=["train.SecondOrStepTimer"]) class SecondOrStepTimer(_HookTimer): """Timer that triggers at most once every N seconds or once every N steps. This symbol is also exported to v2 in tf.estimator namespace. See https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py """ def __init__(self, every_secs=None, every_steps=None): self.reset() self._every_secs = every_secs self._every_steps = every_steps if self._every_secs is None and self._every_steps is None: raise ValueError("Either every_secs or every_steps should be provided.") if (self._every_secs is not None) and (self._every_steps is not None): raise ValueError("Can not provide both every_secs and every_steps.") super(SecondOrStepTimer, self).__init__() def reset(self): self._last_triggered_step = None self._last_triggered_time = None def should_trigger_for_step(self, step): """Return true if the timer should trigger for the specified step. Args: step: Training step to trigger on. Returns: True if the difference between the current time and the time of the last trigger exceeds `every_secs`, or if the difference between the current step and the last triggered step exceeds `every_steps`. False otherwise. """ if self._last_triggered_step is None: return True if self._last_triggered_step == step: return False if self._every_secs is not None: if time.time() >= self._last_triggered_time + self._every_secs: return True if self._every_steps is not None: if step >= self._last_triggered_step + self._every_steps: return True return False def update_last_triggered_step(self, step): current_time = time.time() if self._last_triggered_time is None: elapsed_secs = None elapsed_steps = None else: elapsed_secs = current_time - self._last_triggered_time elapsed_steps = step - self._last_triggered_step self._last_triggered_time = current_time self._last_triggered_step = step return (elapsed_secs, elapsed_steps) def last_triggered_step(self): return self._last_triggered_step class NeverTriggerTimer(_HookTimer): """Timer that never triggers.""" def should_trigger_for_step(self, step): _ = step return False def update_last_triggered_step(self, step): _ = step return (None, None) def last_triggered_step(self): return None @tf_export(v1=["train.LoggingTensorHook"]) class LoggingTensorHook(session_run_hook.SessionRunHook): """Prints the given tensors every N local steps, every N seconds, or at end. The tensors will be printed to the log, with `INFO` severity. If you are not seeing the logs, you might want to add the following line after your imports: ```python tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) ``` Note that if `at_end` is True, `tensors` should not include any tensor whose evaluation produces a side effect such as consuming additional inputs. """ def __init__(self, tensors, every_n_iter=None, every_n_secs=None, at_end=False, formatter=None): """Initializes a `LoggingTensorHook`. Args: tensors: `dict` that maps string-valued tags to tensors/tensor names, or `iterable` of tensors/tensor names. every_n_iter: `int`, print the values of `tensors` once every N local steps taken on the current worker. every_n_secs: `int` or `float`, print the values of `tensors` once every N seconds. Exactly one of `every_n_iter` and `every_n_secs` should be provided. at_end: `bool` specifying whether to print the values of `tensors` at the end of the run. formatter: function, takes dict of `tag`->`Tensor` and returns a string. If `None` uses default printing all tensors. Raises: ValueError: if `every_n_iter` is non-positive. """ only_log_at_end = ( at_end and (every_n_iter is None) and (every_n_secs is None)) if (not only_log_at_end and (every_n_iter is None) == (every_n_secs is None)): raise ValueError( "either at_end and/or exactly one of every_n_iter and every_n_secs " "must be provided.") if every_n_iter is not None and every_n_iter <= 0: raise ValueError("invalid every_n_iter=%s." % every_n_iter) if not isinstance(tensors, dict): self._tag_order = tensors tensors = {item: item for item in tensors} else: self._tag_order = sorted(tensors.keys()) self._tensors = tensors self._formatter = formatter self._timer = ( NeverTriggerTimer() if only_log_at_end else SecondOrStepTimer( every_secs=every_n_secs, every_steps=every_n_iter)) self._log_at_end = at_end def begin(self): self._timer.reset() self._iter_count = 0 # Convert names to tensors if given self._current_tensors = { tag: _as_graph_element(tensor) for (tag, tensor) in self._tensors.items() } def before_run(self, run_context): # pylint: disable=unused-argument self._should_trigger = self._timer.should_trigger_for_step(self._iter_count) if self._should_trigger: return SessionRunArgs(self._current_tensors) else: return None def _log_tensors(self, tensor_values): original = np.get_printoptions() np.set_printoptions(suppress=True) elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count) if self._formatter: logging.info(self._formatter(tensor_values)) else: stats = [] for tag in self._tag_order: stats.append("%s = %s" % (tag, tensor_values[tag])) if elapsed_secs is not None: logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs) else: logging.info("%s", ", ".join(stats)) np.set_printoptions(**original) def after_run(self, run_context, run_values): _ = run_context if self._should_trigger: self._log_tensors(run_values.results) self._iter_count += 1 def end(self, session): if self._log_at_end: values = session.run(self._current_tensors) self._log_tensors(values) def get_or_create_steps_per_run_variable(): """Gets or creates the steps_per_run variable. In Estimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each device program execution and before the next execution. The purpose of using a variable, rather than a constant, is to allow Estimator adapt the device training iterations according to the final steps specified by users. For example, if the user sets the steps_per_run as 4 and steps as 10 in Estimator.train(), the steps_per_run variable will have the following value before each training run. - 1-st execution: steps_per_run = 4 - 2-nd execution: steps_per_run = 4 - 3-rd execution: steps_per_run = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi steps_per_run variables were found. """ graph = ops.get_default_graph() collection_name = "{}_{}".format(_HOOKS, _STEPS_PER_RUN_VAR) steps_per_run_vars = graph.get_collection(collection_name) if len(steps_per_run_vars) == 1: return steps_per_run_vars[0] elif len(steps_per_run_vars) > 1: raise RuntimeError("Multiple steps_per_run_var in collection.") with variable_scope.variable_scope(_HOOKS, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _STEPS_PER_RUN_VAR, initializer=init_ops.ones_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True) class _MultiStepStopAtStepHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step.""" def __init__(self, num_steps=None, last_step=None, steps_per_run=1): """Initializes a `MultiStepStopAtStepHook`. This hook requests stop after either a number of steps have been executed or a last step has been reached. Only one of the two options can be specified. if `num_steps` is specified, it indicates the number of steps to execute after `begin()` is called. If instead `last_step` is specified, it indicates the last step we want to execute, as passed to the `after_run()` call. In Estimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The steps_per_run variable determines the number of iterations of the loop before returning to the CPU. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. steps_per_run: Number of steps executed per run call. Raises: ValueError: If one of the arguments is invalid. """ if num_steps is None and last_step is None: raise ValueError("One of num_steps or last_step must be specified.") if num_steps is not None and last_step is not None: raise ValueError("Only one of num_steps or last_step can be specified.") if steps_per_run is None or steps_per_run < 1: raise ValueError("steps_per_run should be greater than 0") self._num_steps = num_steps self._last_step = last_step self._steps_per_run_initial_value = steps_per_run def begin(self): self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError("Global step should be created to use StopAtStepHook.") self._steps_per_run_variable = get_or_create_steps_per_run_variable() def _update_steps_per_run_variable(self, global_step, session): steps = min(self._last_step - global_step, self._steps_per_run_initial_value) self._steps_per_run_variable.load(steps, session=session) def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) if self._last_step is None: self._last_step = global_step + self._num_steps self._update_steps_per_run_variable(global_step, session) def after_run(self, run_context, run_values): # Global step cannot be retrieved via SessionRunArgs and before_run due to # race condition in hook execution. global_step = run_context.session.run(self._global_step_tensor) if global_step >= self._last_step: run_context.request_stop() else: self._update_steps_per_run_variable(global_step, run_context.session) @tf_export(v1=["train.StopAtStepHook"]) class StopAtStepHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step.""" def __init__(self, num_steps=None, last_step=None): """Initializes a `StopAtStepHook`. This hook requests stop after either a number of steps have been executed or a last step has been reached. Only one of the two options can be specified. if `num_steps` is specified, it indicates the number of steps to execute after `begin()` is called. If instead `last_step` is specified, it indicates the last step we want to execute, as passed to the `after_run()` call. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ if num_steps is None and last_step is None: raise ValueError("One of num_steps or last_step must be specified.") if num_steps is not None and last_step is not None: raise ValueError("Only one of num_steps or last_step can be specified.") self._num_steps = num_steps self._last_step = last_step def begin(self): self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access if self._global_step_tensor is None: raise RuntimeError("Global step should be created to use StopAtStepHook.") def after_create_session(self, session, coord): if self._last_step is None: global_step = session.run(self._global_step_tensor) self._last_step = global_step + self._num_steps def before_run(self, run_context): # pylint: disable=unused-argument return SessionRunArgs(self._global_step_tensor) def after_run(self, run_context, run_values): global_step = run_values.results + 1 if global_step >= self._last_step: # Check latest global step to ensure that the targeted last step is # reached. global_step read tensor is the value of global step # before running the operation. We're not sure whether current session.run # incremented the global_step or not. Here we're checking it. step = run_context.session.run(self._global_step_tensor) if step >= self._last_step: run_context.request_stop() @tf_export(v1=["train.CheckpointSaverListener"]) class CheckpointSaverListener(object): """Interface for listeners that take action before or after checkpoint save. `CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is triggered, and provides callbacks at the following points: - before using the session - before each call to `Saver.save()` - after each call to `Saver.save()` - at the end of session To use a listener, implement a class and pass the listener to a `CheckpointSaverHook`, as in this example: ```python class ExampleCheckpointSaverListener(CheckpointSaverListener): def begin(self): # You can add ops to the graph here. print('Starting the session.') self.your_tensor = ... def before_save(self, session, global_step_value): print('About to write a checkpoint') def after_save(self, session, global_step_value): print('Done writing checkpoint.') if decided_to_stop_training(): return True def end(self, session, global_step_value): print('Done with the session.') ... listener = ExampleCheckpointSaverListener() saver_hook = tf.estimator.CheckpointSaverHook( checkpoint_dir, listeners=[listener]) with tf.compat.v1.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]): ... ``` A `CheckpointSaverListener` may simply take some action after every checkpoint save. It is also possible for the listener to use its own schedule to act less frequently, e.g. based on global_step_value. In this case, implementors should implement the `end()` method to handle actions related to the last checkpoint save. But the listener should not act twice if `after_save()` already handled this last checkpoint save. A `CheckpointSaverListener` can request training to be stopped, by returning True in `after_save`. Please note that, in replicated distributed training setting, only `chief` should use this behavior. Otherwise each worker will do their own evaluation, which may be wasteful of resources. """ def begin(self): pass def before_save(self, session, global_step_value): pass def after_save(self, session, global_step_value): pass def end(self, session, global_step_value): pass @tf_export(v1=["train.CheckpointSaverHook"]) class CheckpointSaverHook(session_run_hook.SessionRunHook): """Saves checkpoints every N steps or seconds.""" def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename="model.ckpt", scaffold=None, listeners=None, save_graph_def=True): """Initializes a `CheckpointSaverHook`. Args: checkpoint_dir: `str`, base directory for the checkpoint files. save_secs: `int`, save every N secs. save_steps: `int`, save every N steps. saver: `Saver` object, used for saving. checkpoint_basename: `str`, base name for the checkpoint files. scaffold: `Scaffold`, use to get saver object. listeners: List of `CheckpointSaverListener` subclass instances. Used for callbacks that run immediately before or after this hook saves the checkpoint. save_graph_def: Whether to save the GraphDef and MetaGraphDef to `checkpoint_dir`. The GraphDef is saved after the session is created as `graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as `model.ckpt-*.meta`. Raises: ValueError: One of `save_steps` or `save_secs` should be set. ValueError: At most one of `saver` or `scaffold` should be set. """ logging.info("Create CheckpointSaverHook.") if saver is not None and scaffold is not None: raise ValueError("You cannot provide both saver and scaffold.") self._saver = saver self._checkpoint_dir = checkpoint_dir self._save_path = os.path.join(checkpoint_dir, checkpoint_basename) self._scaffold = scaffold self._timer = SecondOrStepTimer( every_secs=save_secs, every_steps=save_steps) self._listeners = listeners or [] self._steps_per_run = 1 self._save_graph_def = save_graph_def def _set_steps_per_run(self, steps_per_run): self._steps_per_run = steps_per_run def begin(self): self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir) self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access if self._global_step_tensor is None: raise RuntimeError( "Global step should be created to use CheckpointSaverHook.") for l in self._listeners: l.begin() def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) if self._save_graph_def: # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step) def before_run(self, run_context): # pylint: disable=unused-argument return SessionRunArgs(self._global_step_tensor) def after_run(self, run_context, run_values): stale_global_step = run_values.results if self._timer.should_trigger_for_step(stale_global_step + self._steps_per_run): # get the real value after train op. global_step = run_context.session.run(self._global_step_tensor) if self._timer.should_trigger_for_step(global_step): self._timer.update_last_triggered_step(global_step) if self._save(run_context.session, global_step): run_context.request_stop() def end(self, session): last_step = session.run(self._global_step_tensor) if last_step != self._timer.last_triggered_step(): self._save(session, last_step) for l in self._listeners: l.end(session, last_step) def _save(self, session, step): """Saves the latest checkpoint, returns should_stop.""" logging.info("Calling checkpoint listeners before saving checkpoint %d...", step) for l in self._listeners: l.before_save(session, step) logging.info("Saving checkpoints for %d into %s.", step, self._save_path) self._get_saver().save(session, self._save_path, global_step=step, write_meta_graph=self._save_graph_def) self._summary_writer.add_session_log( SessionLog( status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step) logging.info("Calling checkpoint listeners after saving checkpoint %d...", step) should_stop = False for l in self._listeners: if l.after_save(session, step): logging.info( "A CheckpointSaverListener requested that training be stopped. " "listener: {}".format(l)) should_stop = True return should_stop def _get_saver(self): if self._saver is not None: return self._saver elif self._scaffold is not None: return self._scaffold.saver # Get saver from the SAVERS collection if present. collection_key = ops.GraphKeys.SAVERS savers = ops.get_collection(collection_key) if not savers: raise RuntimeError( "No items in collection {}. Please add a saver to the collection " "or provide a saver or scaffold.".format(collection_key)) elif len(savers) > 1: raise RuntimeError( "More than one item in collection {}. " "Please indicate which one to use by passing it to the constructor." .format(collection_key)) self._saver = savers[0] return savers[0] @tf_export(v1=["train.StepCounterHook"]) class StepCounterHook(session_run_hook.SessionRunHook): """Hook that counts steps per second.""" def __init__(self, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None): if (every_n_steps is None) == (every_n_secs is None): raise ValueError( "exactly one of every_n_steps and every_n_secs should be provided.") self._timer = SecondOrStepTimer( every_steps=every_n_steps, every_secs=every_n_secs) self._summary_writer = summary_writer self._output_dir = output_dir self._last_global_step = None self._steps_per_run = 1 def _set_steps_per_run(self, steps_per_run): self._steps_per_run = steps_per_run def begin(self): if self._summary_writer is None and self._output_dir: self._summary_writer = SummaryWriterCache.get(self._output_dir) self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access if self._global_step_tensor is None: raise RuntimeError( "Global step should be created to use StepCounterHook.") self._summary_tag = training_util.get_global_step().op.name + "/sec" def before_run(self, run_context): # pylint: disable=unused-argument return SessionRunArgs(self._global_step_tensor) def _log_and_record(self, elapsed_steps, elapsed_time, global_step): steps_per_sec = elapsed_steps / elapsed_time if self._summary_writer is not None: summary = Summary(value=[ Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec) ]) self._summary_writer.add_summary(summary, global_step) logging.info("%s: %g", self._summary_tag, steps_per_sec) def after_run(self, run_context, run_values): _ = run_context stale_global_step = run_values.results if self._timer.should_trigger_for_step(stale_global_step + self._steps_per_run): # get the real value after train op. global_step = run_context.session.run(self._global_step_tensor) if self._timer.should_trigger_for_step(global_step): elapsed_time, elapsed_steps = self._timer.update_last_triggered_step( global_step) if elapsed_time is not None: self._log_and_record(elapsed_steps, elapsed_time, global_step) # Check whether the global step has been increased. Here, we do not use the # timer.last_triggered_step as the timer might record a different global # step value such that the comparison could be unreliable. For simplicity, # we just compare the stale_global_step with previously recorded version. if stale_global_step == self._last_global_step: # Here, we give a warning in the first 5 times if we have observed that # the global step has not been increased. For some Optimizers, the global # step is not increased each time by design. For example, # SyncReplicaOptimizer doesn't increase the global step in worker's main # train step. logging.log_first_n( logging.WARN, "It seems that global step (tf.train.get_global_step) has not " "been increased. Current value (could be stable): %s vs previous " "value: %s. You could increase the global step by passing " "tf.train.get_global_step() to Optimizer.apply_gradients or " "Optimizer.minimize.", 5, stale_global_step, self._last_global_step) self._last_global_step = stale_global_step @tf_export(v1=["train.NanLossDuringTrainingError"]) class NanLossDuringTrainingError(RuntimeError): def __str__(self): return "NaN loss during training." @tf_export(v1=["train.NanTensorHook"]) class NanTensorHook(session_run_hook.SessionRunHook): """Monitors the loss tensor and stops training if loss is NaN. Can either fail with exception or just stop training. """ def __init__(self, loss_tensor, fail_on_nan_loss=True): """Initializes a `NanTensorHook`. Args: loss_tensor: `Tensor`, the loss tensor. fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN. """ self._loss_tensor = loss_tensor self._fail_on_nan_loss = fail_on_nan_loss def before_run(self, run_context): # pylint: disable=unused-argument return SessionRunArgs(self._loss_tensor) def after_run(self, run_context, run_values): if np.isnan(run_values.results): failure_message = "Model diverged with loss = NaN." if self._fail_on_nan_loss: logging.error(failure_message) raise NanLossDuringTrainingError else: logging.warning(failure_message) # We don't raise an error but we request stop without an exception. run_context.request_stop() @tf_export(v1=["train.SummarySaverHook"]) class SummarySaverHook(session_run_hook.SessionRunHook): """Saves summaries every N steps.""" def __init__(self, save_steps=None, save_secs=None, output_dir=None, summary_writer=None, scaffold=None, summary_op=None): """Initializes a `SummarySaverHook`. Args: save_steps: `int`, save summaries every N steps. Exactly one of `save_secs` and `save_steps` should be set. save_secs: `int`, save summaries every N seconds. output_dir: `string`, the directory to save the summaries to. Only used if no `summary_writer` is supplied. summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed, one will be created accordingly. scaffold: `Scaffold` to get summary_op if it's not provided. summary_op: `Tensor` of type `string` containing the serialized `Summary` protocol buffer or a list of `Tensor`. They are most likely an output by TF summary methods like `tf.compat.v1.summary.scalar` or `tf.compat.v1.summary.merge_all`. It can be passed in as one tensor; if more than one, they must be passed in as a list. Raises: ValueError: Exactly one of scaffold or summary_op should be set. """ if ((scaffold is None and summary_op is None) or (scaffold is not None and summary_op is not None)): raise ValueError( "Exactly one of scaffold or summary_op must be provided.") self._summary_op = summary_op self._summary_writer = summary_writer self._output_dir = output_dir self._scaffold = scaffold self._timer = SecondOrStepTimer( every_secs=save_secs, every_steps=save_steps) # TODO(mdan): Throw an error if output_dir and summary_writer are None. def begin(self): if self._summary_writer is None and self._output_dir: self._summary_writer = SummaryWriterCache.get(self._output_dir) self._next_step = None self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access if self._global_step_tensor is None: raise RuntimeError( "Global step should be created to use SummarySaverHook.") def before_run(self, run_context): # pylint: disable=unused-argument self._request_summary = ( self._next_step is None or self._timer.should_trigger_for_step(self._next_step)) requests = {"global_step": self._global_step_tensor} if self._request_summary: if self._get_summary_op() is not None: requests["summary"] = self._get_summary_op() return SessionRunArgs(requests) def after_run(self, run_context, run_values): _ = run_context if not self._summary_writer: return stale_global_step = run_values.results["global_step"] global_step = stale_global_step + 1 if self._next_step is None or self._request_summary: global_step = run_context.session.run(self._global_step_tensor) if self._next_step is None: self._summary_writer.add_session_log( SessionLog(status=SessionLog.START), global_step) if self._request_summary: self._timer.update_last_triggered_step(global_step) if "summary" in run_values.results: for summary in run_values.results["summary"]: self._summary_writer.add_summary(summary, global_step) self._next_step = global_step + 1 def end(self, session=None): if self._summary_writer: self._summary_writer.flush() def _get_summary_op(self): """Fetches the summary op either from self._summary_op or self._scaffold. Returns: Returns a list of summary `Tensor`. """ summary_op = None if self._summary_op is not None: summary_op = self._summary_op elif self._scaffold.summary_op is not None: summary_op = self._scaffold.summary_op if summary_op is None: return None if not isinstance(summary_op, list): return [summary_op] return summary_op @tf_export(v1=["train.GlobalStepWaiterHook"]) class GlobalStepWaiterHook(session_run_hook.SessionRunHook): """Delays execution until global step reaches `wait_until_step`. This hook delays execution until global step reaches to `wait_until_step`. It is used to gradually start workers in distributed settings. One example usage would be setting `wait_until_step=int(K*log(task_id+1))` assuming that task_id=0 is the chief. """ def __init__(self, wait_until_step): """Initializes a `GlobalStepWaiterHook`. Args: wait_until_step: an `int` shows until which global step should we wait. """ self._wait_until_step = wait_until_step def begin(self): self._worker_is_started = False self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access if self._global_step_tensor is None: raise RuntimeError( "Global step should be created to use _GlobalStepWaiterHook.") def before_run(self, run_context): if self._worker_is_started: return None if self._wait_until_step <= 0: self._worker_is_started = True return None logging.info("Waiting for global step %d before starting training.", self._wait_until_step) last_logged_step = 0 while True: current_step = run_context.session.run(self._global_step_tensor) if current_step >= self._wait_until_step: self._worker_is_started = True return None if current_step - last_logged_step > 1000: logging.info( "Waiting for global step %d before starting training. " "Current step is %d.", self._wait_until_step, current_step) last_logged_step = current_step time.sleep(0.5) @tf_export(v1=["train.FinalOpsHook"]) class FinalOpsHook(session_run_hook.SessionRunHook): """A hook which evaluates `Tensors` at the end of a session.""" def __init__(self, final_ops, final_ops_feed_dict=None): """Initializes `FinalOpHook` with ops to run at the end of the session. Args: final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to `Tensors`. final_ops_feed_dict: A feed dictionary to use when running `final_ops_dict`. """ self._final_ops = final_ops self._final_ops_feed_dict = final_ops_feed_dict self._final_ops_values = None @property def final_ops_values(self): return self._final_ops_values def end(self, session): if self._final_ops is not None: try: self._final_ops_values = session.run( self._final_ops, feed_dict=self._final_ops_feed_dict) except (errors.OutOfRangeError, StopIteration) as e: logging.warning( "An OutOfRangeError or StopIteration exception is raised by the " "code in FinalOpsHook. This typically means the Ops running by the " "FinalOpsHook have a dependency back to some input source, which " "should not happen. For example, for metrics in " "tf.estimator.Estimator, all metrics functions return two Ops: " "`value_op` and `update_op`. Estimator.evaluate calls the " "`update_op` for each batch of the data in input source and, once " "it is exhausted, it call the `value_op` to get the metric values. " "The `value_op` here should have dependency back to variables " "reading only, rather than reading another batch from input. " "Otherwise, the `value_op`, executed by `FinalOpsHook`, triggers " "another data reading, which ends OutOfRangeError/StopIteration. " "Please fix that.") raise e @tf_export(v1=["train.FeedFnHook"]) class FeedFnHook(session_run_hook.SessionRunHook): """Runs `feed_fn` and sets the `feed_dict` accordingly.""" def __init__(self, feed_fn): """Initializes a `FeedFnHook`. Args: feed_fn: function that takes no arguments and returns `dict` of `Tensor` to feed. """ self.feed_fn = feed_fn def before_run(self, run_context): # pylint: disable=unused-argument return session_run_hook.SessionRunArgs( fetches=None, feed_dict=self.feed_fn()) @tf_export(v1=["train.ProfilerHook"]) class ProfilerHook(session_run_hook.SessionRunHook): """Captures CPU/GPU profiling information every N steps or seconds. This produces files called "timeline-<step>.json", which are in Chrome Trace format. For more information see: https://github.com/catapult-project/catapult/blob/master/tracing/README.md """ def __init__(self, save_steps=None, save_secs=None, output_dir="", show_dataflow=True, show_memory=False): """Initializes a hook that takes periodic profiling snapshots. `options.run_metadata` argument of `tf.Session.Run` is used to collect metadata about execution. This hook sets the metadata and dumps it in Chrome Trace format. Args: save_steps: `int`, save profile traces every N steps. Exactly one of `save_secs` and `save_steps` should be set. save_secs: `int` or `float`, save profile traces every N seconds. output_dir: `string`, the directory to save the profile traces to. Defaults to the current directory. show_dataflow: `bool`, if True, add flow events to the trace connecting producers and consumers of tensors. show_memory: `bool`, if True, add object snapshot events to the trace showing the sizes and lifetimes of tensors. """ self._output_file = os.path.join(output_dir, "timeline-{}.json") self._file_writer = SummaryWriterCache.get(output_dir) self._show_dataflow = show_dataflow self._show_memory = show_memory self._timer = SecondOrStepTimer( every_secs=save_secs, every_steps=save_steps) def begin(self): self._next_step = None self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access if self._global_step_tensor is None: raise RuntimeError("Global step should be created to use ProfilerHook.") def before_run(self, run_context): self._request_summary = ( self._next_step is not None and self._timer.should_trigger_for_step(self._next_step)) requests = {"global_step": self._global_step_tensor} opts = ( config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) if self._request_summary else None) return SessionRunArgs(requests, options=opts) def after_run(self, run_context, run_values): stale_global_step = run_values.results["global_step"] if self._next_step is None: # Update the timer so that it does not activate until N steps or seconds # have passed. self._timer.update_last_triggered_step(stale_global_step) global_step = stale_global_step + 1 if self._request_summary: global_step = run_context.session.run(self._global_step_tensor) self._timer.update_last_triggered_step(global_step) self._save(global_step, self._output_file.format(global_step), run_values.run_metadata.step_stats) self._file_writer.add_run_metadata(run_values.run_metadata, "step_%d" % global_step) self._next_step = global_step + 1 def _save(self, step, save_path, step_stats): logging.info("Saving timeline for %d into '%s'.", step, save_path) with gfile.Open(save_path, "w") as f: trace = timeline.Timeline(step_stats) f.write( trace.generate_chrome_trace_format( show_dataflow=self._show_dataflow, show_memory=self._show_memory)) def _as_graph_element(obj): """Retrieves Graph element.""" graph = ops.get_default_graph() if not isinstance(obj, six.string_types): if not hasattr(obj, "graph") or obj.graph != graph: raise ValueError("Passed %s should have graph attribute that is equal " "to current graph %s." % (obj, graph)) return obj if ":" in obj: element = graph.as_graph_element(obj) else: element = graph.as_graph_element(obj + ":0") # Check that there is no :1 (e.g. it's single output). try: graph.as_graph_element(obj + ":1") except (KeyError, ValueError): pass else: raise ValueError("Name %s is ambiguous, " "as this `Operation` has multiple outputs " "(at least 2)." % obj) return element
apache-2.0
tannishk/airmozilla
airmozilla/manage/views/groups.py
6
2799
from django.contrib.auth.models import Group from django.contrib import messages from django.shortcuts import render, redirect from django.db import transaction from jsonview.decorators import json_view from airmozilla.base import mozillians from airmozilla.manage import forms from .decorators import ( staff_required, permission_required, cancel_redirect ) @staff_required @permission_required('auth.change_group') def groups(request): """Group editor: view groups and change group permissions.""" groups = Group.objects.all() return render(request, 'manage/groups.html', {'groups': groups}) @staff_required @permission_required('auth.change_group') @cancel_redirect('manage:groups') @transaction.commit_on_success def group_edit(request, id): """Edit an individual group.""" group = Group.objects.get(id=id) if request.method == 'POST': form = forms.GroupEditForm(request.POST, instance=group) if form.is_valid(): form.save() messages.info(request, 'Group "%s" saved.' % group.name) return redirect('manage:groups') else: form = forms.GroupEditForm(instance=group) return render(request, 'manage/group_edit.html', {'form': form, 'group': group}) @staff_required @permission_required('auth.add_group') @transaction.commit_on_success def group_new(request): """Add a new group.""" group = Group() if request.method == 'POST': form = forms.GroupEditForm(request.POST, instance=group) if form.is_valid(): form.save() messages.success(request, 'Group "%s" created.' % group.name) return redirect('manage:groups') else: form = forms.GroupEditForm(instance=group) return render(request, 'manage/group_new.html', {'form': form}) @staff_required @permission_required('auth.delete_group') @transaction.commit_on_success def group_remove(request, id): if request.method == 'POST': group = Group.objects.get(id=id) group.delete() messages.info(request, 'Group "%s" removed.' % group.name) return redirect('manage:groups') @permission_required('main.change_event') @json_view def curated_groups_autocomplete(request): q = request.GET.get('q', '').strip() if not q: return {'groups': []} all = mozillians.get_all_groups_cached() def describe_group(group): if group['number_of_members'] == 1: return '%s (1 member)' % (group['name'],) else: return ( '%s (%s members)' % (group['name'], group['number_of_members']) ) groups = [ (x['name'], describe_group(x)) for x in all if q.lower() in x['name'].lower() ] return {'groups': groups}
bsd-3-clause