repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Jorge-Rodriguez/ansible | lib/ansible/modules/net_tools/nios/nios_fixed_address.py | 31 | 8390 | #!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_fixed_address
version_added: "2.8"
author: "Sumit Jaiswal (@sjaiswal)"
short_description: Configure Infoblox NIOS DHCP Fixed Address
description:
- A fixed address is a specific IP address that a DHCP server
always assigns when a lease request comes from a particular
MAC address of the clien.
- Supports both IPV4 and IPV6 internet protocols
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the hostname with which fixed DHCP ip-address is stored
for respective mac.
required: false
ipaddr:
description:
- IPV4/V6 address of the fixed address.
required: true
mac:
description:
- The MAC address of the interface.
required: true
network:
description:
- Specifies the network range in which ipaddr exists.
required: true
aliases:
- network
network_view:
description:
- Configures the name of the network view to associate with this
configured instance.
required: false
default: default
options:
description:
- Configures the set of DHCP options to be included as part of
the configured network instance. This argument accepts a list
of values (see suboptions). When configuring suboptions at
least one of C(name) or C(num) must be specified.
suboptions:
name:
description:
- The name of the DHCP option to configure
num:
description:
- The number of the DHCP option to configure
value:
description:
- The value of the DHCP option specified by C(name)
required: true
use_option:
description:
- Only applies to a subset of options (see NIOS API documentation)
type: bool
default: 'yes'
vendor_class:
description:
- The name of the space this DHCP option is associated to
default: DHCP
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure ipv4 dhcp fixed address
nios_fixed_address:
name: ipv4_fixed
ipaddr: 192.168.10.1
mac: 08:6d:41:e8:fd:e8
network: 192.168.10.0/24
network_view: default
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: configure a ipv6 dhcp fixed address
nios_fixed_address:
name: ipv6_fixed
ipaddr: fe80::1/10
mac: 08:6d:41:e8:fd:e8
network: fe80::/64
network_view: default
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: set dhcp options for a ipv4 fixed address
nios_fixed_address:
name: ipv4_fixed
ipaddr: 192.168.10.1
mac: 08:6d:41:e8:fd:e8
network: 192.168.10.0/24
network_view: default
comment: this is a test comment
options:
- name: domain-name
value: ansible.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove a ipv4 dhcp fixed address
nios_fixed_address:
name: ipv4_fixed
ipaddr: 192.168.10.1
mac: 08:6d:41:e8:fd:e8
network: 192.168.10.0/24
network_view: default
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.network.common.utils import validate_ip_address, validate_ip_v6_address
from ansible.module_utils.net_tools.nios.api import NIOS_IPV4_FIXED_ADDRESS, NIOS_IPV6_FIXED_ADDRESS
def options(module):
''' Transforms the module argument into a valid WAPI struct
This function will transform the options argument into a structure that
is a valid WAPI structure in the format of:
{
name: <value>,
num: <value>,
value: <value>,
use_option: <value>,
vendor_class: <value>
}
It will remove any options that are set to None since WAPI will error on
that condition. It will also verify that either `name` or `num` is
set in the structure but does not validate the values are equal.
The remainder of the value validation is performed by WAPI
'''
options = list()
for item in module.params['options']:
opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
if 'name' not in opt and 'num' not in opt:
module.fail_json(msg='one of `name` or `num` is required for option value')
options.append(opt)
return options
def validate_ip_addr_type(ip, arg_spec, module):
'''This function will check if the argument ip is type v4/v6 and return appropriate infoblox network type
'''
check_ip = ip.split('/')
if validate_ip_address(check_ip[0]) and 'ipaddr' in arg_spec:
arg_spec['ipv4addr'] = arg_spec.pop('ipaddr')
module.params['ipv4addr'] = module.params.pop('ipaddr')
return NIOS_IPV4_FIXED_ADDRESS, arg_spec, module
elif validate_ip_v6_address(check_ip[0]) and 'ipaddr' in arg_spec:
arg_spec['ipv6addr'] = arg_spec.pop('ipaddr')
module.params['ipv6addr'] = module.params.pop('ipaddr')
return NIOS_IPV6_FIXED_ADDRESS, arg_spec, module
def main():
''' Main entry point for module execution
'''
option_spec = dict(
# one of name or num is required; enforced by the function options()
name=dict(),
num=dict(type='int'),
value=dict(required=True),
use_option=dict(type='bool', default=True),
vendor_class=dict(default='DHCP')
)
ib_spec = dict(
name=dict(required=True),
ipaddr=dict(required=True, aliases=['ipaddr'], ib_req=True),
mac=dict(required=True, aliases=['mac'], ib_req=True),
network=dict(required=True, aliases=['network'], ib_req=True),
network_view=dict(default='default', aliases=['network_view']),
options=dict(type='list', elements='dict', options=option_spec, transform=options),
extattrs=dict(type='dict'),
comment=dict()
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# to get the argument ipaddr
obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
# to modify argument based on ipaddr type i.e. IPV4/IPV6
fixed_address_ip_type, ib_spec, module = validate_ip_addr_type(obj_filter['ipaddr'], ib_spec, module)
wapi = WapiModule(module)
result = wapi.run(fixed_address_ip_type, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
chiefspace/udemy-rest-api | udemy_rest_api_section4/env/lib/python3.4/site-packages/aniso8601/interval.py | 5 | 5596 | # -*- coding: utf-8 -*-
# Copyright (c) 2016, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from datetime import datetime
from aniso8601.duration import parse_duration
from aniso8601.time import parse_datetime
from aniso8601.date import parse_date
def parse_interval(isointervalstr, intervaldelimiter='/', datetimedelimiter='T', relative=False):
#Given a string representing an ISO 8601 interval, return a
#tuple of datetime.date or date.datetime objects representing the beginning
#and end of the specified interval. Valid formats are:
#
#<start>/<end>
#<start>/<duration>
#<duration>/<end>
#
#The <start> and <end> values can represent dates, or datetimes,
#not times.
#
#The format:
#
#<duration>
#
#Is expressly not supported as there is no way to provide the addtional
#required context.
firstpart, secondpart = isointervalstr.split(intervaldelimiter)
if firstpart[0] == 'P':
#<duration>/<end>
#Notice that these are not returned 'in order' (earlier to later), this
#is to maintain consistency with parsing <start>/<end> durations, as
#well as making repeating interval code cleaner. Users who desire
#durations to be in order can use the 'sorted' operator.
#We need to figure out if <end> is a date, or a datetime
if secondpart.find(datetimedelimiter) != -1:
#<end> is a datetime
duration = parse_duration(firstpart, relative=relative)
enddatetime = parse_datetime(secondpart, delimiter=datetimedelimiter)
return (enddatetime, enddatetime - duration)
else:
#<end> must just be a date
duration = parse_duration(firstpart, relative=relative)
enddate = parse_date(secondpart)
#See if we need to upconvert to datetime to preserve resolution
if firstpart.find(datetimedelimiter) != -1:
return (enddate, datetime.combine(enddate, datetime.min.time()) - duration)
else:
return (enddate, enddate - duration)
elif secondpart[0] == 'P':
#<start>/<duration>
#We need to figure out if <start> is a date, or a datetime
if firstpart.find(datetimedelimiter) != -1:
#<end> is a datetime
duration = parse_duration(secondpart, relative=relative)
startdatetime = parse_datetime(firstpart, delimiter=datetimedelimiter)
return (startdatetime, startdatetime + duration)
else:
#<start> must just be a date
duration = parse_duration(secondpart, relative=relative)
startdate = parse_date(firstpart)
#See if we need to upconvert to datetime to preserve resolution
if secondpart.find(datetimedelimiter) != -1:
return (startdate, datetime.combine(startdate, datetime.min.time()) + duration)
else:
return (startdate, startdate + duration)
else:
#<start>/<end>
if firstpart.find(datetimedelimiter) != -1 and secondpart.find(datetimedelimiter) != -1:
#Both parts are datetimes
return (parse_datetime(firstpart, delimiter=datetimedelimiter), parse_datetime(secondpart, delimiter=datetimedelimiter))
elif firstpart.find(datetimedelimiter) != -1 and secondpart.find(datetimedelimiter) == -1:
#First part is a datetime, second part is a date
return (parse_datetime(firstpart, delimiter=datetimedelimiter), parse_date(secondpart))
elif firstpart.find(datetimedelimiter) == -1 and secondpart.find(datetimedelimiter) != -1:
#First part is a date, second part is a datetime
return (parse_date(firstpart), parse_datetime(secondpart, delimiter=datetimedelimiter))
else:
#Both parts are dates
return (parse_date(firstpart), parse_date(secondpart))
def parse_repeating_interval(isointervalstr, intervaldelimiter='/', datetimedelimiter='T', relative=False):
#Given a string representing an ISO 8601 interval repating, return a
#generator of datetime.date or date.datetime objects representing the
#dates specified by the repeating interval. Valid formats are:
#
#Rnn/<interval>
#R/<interval>
if isointervalstr[0] != 'R':
raise ValueError('ISO 8601 repeating interval must start with an R.')
#Parse the number of iterations
iterationpart, intervalpart = isointervalstr.split(intervaldelimiter, 1)
if len(iterationpart) > 1:
iterations = int(iterationpart[1:])
else:
iterations = None
interval = parse_interval(intervalpart, intervaldelimiter, datetimedelimiter, relative=relative)
intervaltimedelta = interval[1] - interval[0]
#Now, build and return the generator
if iterations != None:
return _date_generator(interval[0], intervaltimedelta, iterations)
else:
return _date_generator_unbounded(interval[0], intervaltimedelta)
def _date_generator(startdate, timedelta, iterations):
currentdate = startdate
currentiteration = 0
while currentiteration < iterations:
yield currentdate
#Update the values
currentdate += timedelta
currentiteration += 1
def _date_generator_unbounded(startdate, timedelta):
currentdate = startdate
while True:
yield currentdate
#Update the value
currentdate += timedelta
| gpl-2.0 |
ebar0n/django | django/core/management/commands/squashmigrations.py | 17 | 9351 | from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections, migrations
from django.db.migrations.loader import AmbiguityError, MigrationLoader
from django.db.migrations.migration import SwappableTuple
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.writer import MigrationWriter
from django.utils.version import get_docs_version
class Command(BaseCommand):
help = "Squashes an existing set of migrations (from first until specified) into a single new one."
def add_arguments(self, parser):
parser.add_argument(
'app_label',
help='App label of the application to squash migrations for.',
)
parser.add_argument(
'start_migration_name', default=None, nargs='?',
help='Migrations will be squashed starting from and including this migration.',
)
parser.add_argument(
'migration_name',
help='Migrations will be squashed until and including this migration.',
)
parser.add_argument(
'--no-optimize', action='store_true', dest='no_optimize',
help='Do not try to optimize the squashed operations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--squashed-name', dest='squashed_name',
help='Sets the name of the new squashed migration.',
)
def handle(self, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
app_label = options['app_label']
start_migration_name = options['start_migration_name']
migration_name = options['migration_name']
no_optimize = options['no_optimize']
squashed_name = options['squashed_name']
# Load the current graph state, check the app and migration they asked for exists
loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])
if app_label not in loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (so squashmigrations on "
"it makes no sense)" % app_label
)
migration = self.find_migration(loader, app_label, migration_name)
# Work out the list of predecessor migrations
migrations_to_squash = [
loader.get_migration(al, mn)
for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name))
if al == migration.app_label
]
if start_migration_name:
start_migration = self.find_migration(loader, app_label, start_migration_name)
start = loader.get_migration(start_migration.app_label, start_migration.name)
try:
start_index = migrations_to_squash.index(start)
migrations_to_squash = migrations_to_squash[start_index:]
except ValueError:
raise CommandError(
"The migration '%s' cannot be found. Maybe it comes after "
"the migration '%s'?\n"
"Have a look at:\n"
" python manage.py showmigrations %s\n"
"to debug this issue." % (start_migration, migration, app_label)
)
# Tell them what we're doing and optionally ask if we should proceed
if self.verbosity > 0 or self.interactive:
self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:"))
for migration in migrations_to_squash:
self.stdout.write(" - %s" % migration.name)
if self.interactive:
answer = None
while not answer or answer not in "yn":
answer = input("Do you wish to proceed? [yN] ")
if not answer:
answer = "n"
break
else:
answer = answer[0].lower()
if answer != "y":
return
# Load the operations from all those migrations and concat together,
# along with collecting external dependencies and detecting
# double-squashing
operations = []
dependencies = set()
# We need to take all dependencies from the first migration in the list
# as it may be 0002 depending on 0001
first_migration = True
for smigration in migrations_to_squash:
if smigration.replaces:
raise CommandError(
"You cannot squash squashed migrations! Please transition "
"it to a normal migration first: "
"https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version()
)
operations.extend(smigration.operations)
for dependency in smigration.dependencies:
if isinstance(dependency, SwappableTuple):
if settings.AUTH_USER_MODEL == dependency.setting:
dependencies.add(("__setting__", "AUTH_USER_MODEL"))
else:
dependencies.add(dependency)
elif dependency[0] != smigration.app_label or first_migration:
dependencies.add(dependency)
first_migration = False
if no_optimize:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)"))
new_operations = operations
else:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Optimizing..."))
optimizer = MigrationOptimizer()
new_operations = optimizer.optimize(operations, migration.app_label)
if self.verbosity > 0:
if len(new_operations) == len(operations):
self.stdout.write(" No optimizations possible.")
else:
self.stdout.write(
" Optimized from %s operations to %s operations." %
(len(operations), len(new_operations))
)
# Work out the value of replaces (any squashed ones we're re-squashing)
# need to feed their replaces into ours
replaces = []
for migration in migrations_to_squash:
if migration.replaces:
replaces.extend(migration.replaces)
else:
replaces.append((migration.app_label, migration.name))
# Make a new migration with those operations
subclass = type("Migration", (migrations.Migration, ), {
"dependencies": dependencies,
"operations": new_operations,
"replaces": replaces,
})
if start_migration_name:
if squashed_name:
# Use the name from --squashed-name.
prefix, _ = start_migration.name.split('_', 1)
name = '%s_%s' % (prefix, squashed_name)
else:
# Generate a name.
name = '%s_squashed_%s' % (start_migration.name, migration.name)
new_migration = subclass(name, app_label)
else:
name = '0001_%s' % (squashed_name or 'squashed_%s' % migration.name)
new_migration = subclass(name, app_label)
new_migration.initial = True
# Write out the new migration file
writer = MigrationWriter(new_migration)
with open(writer.path, "w", encoding='utf-8') as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path))
self.stdout.write(" You should commit this migration but leave the old ones in place;")
self.stdout.write(" the new migration will be used for new installs. Once you are sure")
self.stdout.write(" all instances of the codebase have applied the migrations you squashed,")
self.stdout.write(" you can delete them.")
if writer.needs_manual_porting:
self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required"))
self.stdout.write(" Your migrations contained functions that must be manually copied over,")
self.stdout.write(" as we could not safely copy their implementation.")
self.stdout.write(" See the comment at the top of the squashed migration for details.")
def find_migration(self, loader, app_label, name):
try:
return loader.get_migration_by_prefix(app_label, name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. Please be "
"more specific." % (name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'." %
(name, app_label)
)
| bsd-3-clause |
zi-w/Structural-Kernel-Learning-for-HDBBO | test_functions/python_related/generate_simudata4.py | 2 | 1054 | #!/usr/bin/env python
# Copyright (c) 2017 Zi Wang
from push_world import *
import sys
# difference to generate_simudata is an input that control angle of push
if __name__ == '__main__':
rx = float(sys.argv[1])
ry = float(sys.argv[2])
gx = float(sys.argv[4])
gy = float(sys.argv[5])
init_angle = float(sys.argv[6])
simu_steps = int(float(sys.argv[3]) * 10)
# Set the parameter to True if need gui
world = b2WorldInterface(False)
oshape, osize, ofriction, odensity, bfriction, hand_shape, hand_size = 'circle', 1, 0.01, 0.05, 0.01, 'rectangle', (0.3,1)
thing,base = make_thing(500, 500, world, oshape, osize, ofriction, odensity, bfriction, (0,0))
xvel = -rx;
yvel = -ry;
regu = np.linalg.norm([xvel,yvel])
xvel = xvel / regu * 10;
yvel = yvel / regu * 10;
robot = end_effector(world, (rx,ry), base, init_angle, hand_shape, hand_size)
ret = simu_push2(world, thing, robot, base, xvel, yvel, simu_steps)
ret = np.linalg.norm(np.array([gx, gy]) - ret)
sys.stdout.write(str(ret)) | mit |
BT-ojossen/partner-contact | partner_contact_gender/__openerp__.py | 9 | 1252 | # -*- encoding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{
"name": "Contact gender",
"version": "1.0",
"category": "Customer Relationship Management",
"author": "Odoo Community Association (OCA), Grupo ESOC",
"license": "AGPL-3",
"website": "https://odoo-community.org/",
"installable": True,
"application": False,
"summary": "Add gender field to contacts",
"depends": [
"partner_contact_personal_information_page",
],
"data": [
"views/res_partner.xml",
],
}
| agpl-3.0 |
paplorinc/intellij-community | python/helpers/pycharm/_jb_pytest_runner.py | 1 | 1689 | # coding=utf-8
import sys
import pytest
from _pytest.config import get_plugin_manager
from _pytest import config
from pkg_resources import iter_entry_points
from _jb_runner_tools import jb_patch_separator, jb_doc_args, JB_DISABLE_BUFFERING, start_protocol, parse_arguments, \
set_parallel_mode
from teamcity import pytest_plugin
if __name__ == '__main__':
real_prepare_config = config._prepareconfig
path, targets, additional_args = parse_arguments()
sys.argv += additional_args
joined_targets = jb_patch_separator(targets, fs_glue="/", python_glue="::", fs_to_python_glue=".py::")
# When file is launched in pytest it should be file.py: you can't provide it as bare module
joined_targets = [t + ".py" if ":" not in t else t for t in joined_targets]
sys.argv += [path] if path else joined_targets
# plugin is discovered automatically in 3, but not in 2
# to prevent "plugin already registered" problem we check it first
plugins_to_load = []
if not get_plugin_manager().hasplugin("pytest-teamcity"):
if "pytest-teamcity" not in map(lambda e: e.name, iter_entry_points(group='pytest11', name=None)):
plugins_to_load.append(pytest_plugin)
args = sys.argv[1:]
if JB_DISABLE_BUFFERING and "-s" not in args:
args += ["-s"]
jb_doc_args("pytest", args)
# We need to preparse numprocesses because user may set it using ini file
config_result = real_prepare_config(args, plugins_to_load)
if getattr(config_result.option, "numprocesses", None):
set_parallel_mode()
config._prepareconfig = lambda _, __: config_result
start_protocol()
pytest.main(args, plugins_to_load)
| apache-2.0 |
richokarl/Printrun | printrun/gui/__init__.py | 15 | 13974 | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import logging
try:
import wx
except:
logging.error(_("WX is not installed. This program requires WX to run."))
raise
from printrun.utils import install_locale
install_locale('pronterface')
from .controls import ControlsSizer, add_extra_controls
from .viz import VizPane
from .log import LogPane
from .toolbar import MainToolbar
class ToggleablePane(wx.BoxSizer):
def __init__(self, root, label, parentpanel, parentsizers):
super(ToggleablePane, self).__init__(wx.HORIZONTAL)
if not parentpanel: parentpanel = root.panel
self.root = root
self.visible = True
self.parentpanel = parentpanel
self.parentsizers = parentsizers
self.panepanel = root.newPanel(parentpanel)
self.button = wx.Button(parentpanel, -1, label, size = (22, 18), style = wx.BU_EXACTFIT)
self.button.Bind(wx.EVT_BUTTON, self.toggle)
def toggle(self, event):
if self.visible:
self.Hide(self.panepanel)
self.on_hide()
else:
self.Show(self.panepanel)
self.on_show()
self.visible = not self.visible
self.button.SetLabel(">" if self.button.GetLabel() == "<" else "<")
class LeftPaneToggleable(ToggleablePane):
def __init__(self, root, parentpanel, parentsizers):
super(LeftPaneToggleable, self).__init__(root, "<", parentpanel, parentsizers)
self.Add(self.panepanel, 0, wx.EXPAND)
self.Add(self.button, 0)
def set_sizer(self, sizer):
self.panepanel.SetSizer(sizer)
def on_show(self):
for sizer in self.parentsizers:
sizer.Layout()
def on_hide(self):
for sizer in self.parentsizers:
# Expand right splitterwindow
if isinstance(sizer, wx.SplitterWindow):
if sizer.shrinked:
button_width = self.button.GetSize()[0]
sizer.SetSashPosition(sizer.GetSize()[0] - button_width)
else:
sizer.Layout()
class LogPaneToggleable(ToggleablePane):
def __init__(self, root, parentpanel, parentsizers):
super(LogPaneToggleable, self).__init__(root, ">", parentpanel, parentsizers)
self.Add(self.button, 0)
pane = LogPane(root, self.panepanel)
self.panepanel.SetSizer(pane)
self.Add(self.panepanel, 1, wx.EXPAND)
self.splitter = self.parentpanel.GetParent()
def on_show(self):
self.splitter.shrinked = False
self.splitter.SetSashPosition(self.splitter.GetSize()[0] - self.orig_width)
self.splitter.SetMinimumPaneSize(self.orig_min_size)
self.splitter.SetSashGravity(self.orig_gravity)
if hasattr(self.splitter, "SetSashSize"): self.splitter.SetSashSize(self.orig_sash_size)
if hasattr(self.splitter, "SetSashInvisible"): self.splitter.SetSashInvisible(False)
for sizer in self.parentsizers:
sizer.Layout()
def on_hide(self):
self.splitter.shrinked = True
self.orig_width = self.splitter.GetSize()[0] - self.splitter.GetSashPosition()
button_width = self.button.GetSize()[0]
self.orig_min_size = self.splitter.GetMinimumPaneSize()
self.orig_gravity = self.splitter.GetSashGravity()
self.splitter.SetMinimumPaneSize(button_width)
self.splitter.SetSashGravity(1)
self.splitter.SetSashPosition(self.splitter.GetSize()[0] - button_width)
if hasattr(self.splitter, "SetSashSize"):
self.orig_sash_size = self.splitter.GetSashSize()
self.splitter.SetSashSize(0)
if hasattr(self.splitter, "SetSashInvisible"): self.splitter.SetSashInvisible(True)
for sizer in self.parentsizers:
sizer.Layout()
class MainWindow(wx.Frame):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# this list will contain all controls that should be only enabled
# when we're connected to a printer
self.panel = wx.Panel(self, -1)
self.reset_ui()
self.statefulControls = []
def reset_ui(self):
self.panels = []
self.printerControls = []
def newPanel(self, parent, add_to_list = True):
panel = wx.Panel(parent)
self.registerPanel(panel, add_to_list)
return panel
def registerPanel(self, panel, add_to_list = True):
panel.SetBackgroundColour(self.bgcolor)
if add_to_list: self.panels.append(panel)
def createTabbedGui(self):
self.notesizer = wx.BoxSizer(wx.VERTICAL)
self.notebook = wx.Notebook(self.panel)
self.notebook.SetBackgroundColour(self.bgcolor)
page1panel = self.newPanel(self.notebook)
page2panel = self.newPanel(self.notebook)
self.mainsizer_page1 = wx.BoxSizer(wx.VERTICAL)
page1panel1 = self.newPanel(page1panel)
page1panel2 = self.newPanel(page1panel)
self.toolbarsizer = MainToolbar(self, page1panel1, use_wrapsizer = True)
page1panel1.SetSizer(self.toolbarsizer)
self.mainsizer_page1.Add(page1panel1, 0, wx.EXPAND)
self.lowersizer = wx.BoxSizer(wx.HORIZONTAL)
page1panel2.SetSizer(self.lowersizer)
leftsizer = wx.BoxSizer(wx.VERTICAL)
controls_sizer = ControlsSizer(self, page1panel2, True)
leftsizer.Add(controls_sizer, 1, wx.ALIGN_CENTER)
rightsizer = wx.BoxSizer(wx.VERTICAL)
extracontrols = wx.GridBagSizer()
add_extra_controls(extracontrols, self, page1panel2, controls_sizer.extra_buttons)
rightsizer.AddStretchSpacer()
rightsizer.Add(extracontrols, 0, wx.ALIGN_CENTER)
self.lowersizer.Add(leftsizer, 0, wx.ALIGN_CENTER | wx.RIGHT, border = 10)
self.lowersizer.Add(rightsizer, 1, wx.ALIGN_CENTER)
self.mainsizer_page1.Add(page1panel2, 1)
self.mainsizer = wx.BoxSizer(wx.HORIZONTAL)
self.splitterwindow = wx.SplitterWindow(page2panel, style = wx.SP_3D)
page2sizer1 = wx.BoxSizer(wx.HORIZONTAL)
page2panel1 = self.newPanel(self.splitterwindow)
page2sizer2 = wx.BoxSizer(wx.HORIZONTAL)
page2panel2 = self.newPanel(self.splitterwindow)
vizpane = VizPane(self, page2panel1)
page2sizer1.Add(vizpane, 1, wx.EXPAND)
page2sizer2.Add(LogPane(self, page2panel2), 1, wx.EXPAND)
page2panel1.SetSizer(page2sizer1)
page2panel2.SetSizer(page2sizer2)
self.splitterwindow.SetMinimumPaneSize(1)
self.splitterwindow.SetSashGravity(0.5)
self.splitterwindow.SplitVertically(page2panel1, page2panel2,
self.settings.last_sash_position)
self.mainsizer.Add(self.splitterwindow, 1, wx.EXPAND)
page1panel.SetSizer(self.mainsizer_page1)
page2panel.SetSizer(self.mainsizer)
self.notesizer.Add(self.notebook, 1, wx.EXPAND)
self.notebook.AddPage(page1panel, _("Commands"))
self.notebook.AddPage(page2panel, _("Status"))
if self.settings.uimode == _("Tabbed with platers"):
from printrun.stlplater import StlPlaterPanel
from printrun.gcodeplater import GcodePlaterPanel
page3panel = StlPlaterPanel(parent = self.notebook,
callback = self.platecb,
build_dimensions = self.build_dimensions_list,
circular_platform = self.settings.circular_bed,
simarrange_path = self.settings.simarrange_path,
antialias_samples = int(self.settings.antialias3dsamples))
page4panel = GcodePlaterPanel(parent = self.notebook,
callback = self.platecb,
build_dimensions = self.build_dimensions_list,
circular_platform = self.settings.circular_bed,
antialias_samples = int(self.settings.antialias3dsamples))
self.registerPanel(page3panel)
self.registerPanel(page4panel)
self.notebook.AddPage(page3panel, _("Plater"))
self.notebook.AddPage(page4panel, _("G-Code Plater"))
self.panel.SetSizer(self.notesizer)
self.panel.Bind(wx.EVT_MOUSE_EVENTS, self.editbutton)
self.Bind(wx.EVT_CLOSE, self.kill)
# Custom buttons
if wx.VERSION > (2, 9): self.cbuttonssizer = wx.WrapSizer(wx.HORIZONTAL)
else: self.cbuttonssizer = wx.GridBagSizer()
self.cbuttonssizer = wx.GridBagSizer()
self.centerpanel = self.newPanel(page1panel2)
self.centerpanel.SetSizer(self.cbuttonssizer)
rightsizer.Add(self.centerpanel, 0, wx.ALIGN_CENTER)
rightsizer.AddStretchSpacer()
self.panel.SetSizerAndFit(self.notesizer)
self.cbuttons_reload()
minsize = self.lowersizer.GetMinSize() # lower pane
minsize[1] = self.notebook.GetSize()[1]
self.SetMinSize(self.ClientToWindowSize(minsize)) # client to window
self.Fit()
def createGui(self, compact = False, mini = False):
self.mainsizer = wx.BoxSizer(wx.VERTICAL)
self.lowersizer = wx.BoxSizer(wx.HORIZONTAL)
upperpanel = self.newPanel(self.panel, False)
self.toolbarsizer = MainToolbar(self, upperpanel)
lowerpanel = self.newPanel(self.panel)
upperpanel.SetSizer(self.toolbarsizer)
lowerpanel.SetSizer(self.lowersizer)
leftpanel = self.newPanel(lowerpanel)
left_pane = LeftPaneToggleable(self, leftpanel, [self.lowersizer])
leftpanel.SetSizer(left_pane)
left_real_panel = left_pane.panepanel
controls_panel = self.newPanel(left_real_panel)
controls_sizer = ControlsSizer(self, controls_panel, mini_mode = mini)
controls_panel.SetSizer(controls_sizer)
left_sizer = wx.BoxSizer(wx.VERTICAL)
left_sizer.Add(controls_panel, 1, wx.EXPAND)
left_pane.set_sizer(left_sizer)
self.lowersizer.Add(leftpanel, 0, wx.EXPAND)
if not compact: # Use a splitterwindow to group viz and log
rightpanel = self.newPanel(lowerpanel)
rightsizer = wx.BoxSizer(wx.VERTICAL)
rightpanel.SetSizer(rightsizer)
self.splitterwindow = wx.SplitterWindow(rightpanel, style = wx.SP_3D)
self.splitterwindow.SetMinimumPaneSize(150)
self.splitterwindow.SetSashGravity(0.8)
rightsizer.Add(self.splitterwindow, 1, wx.EXPAND)
vizpanel = self.newPanel(self.splitterwindow)
logpanel = self.newPanel(self.splitterwindow)
self.splitterwindow.SplitVertically(vizpanel, logpanel,
self.settings.last_sash_position)
self.splitterwindow.shrinked = False
else:
vizpanel = self.newPanel(lowerpanel)
logpanel = self.newPanel(left_real_panel)
viz_pane = VizPane(self, vizpanel)
# Custom buttons
if wx.VERSION > (2, 9): self.cbuttonssizer = wx.WrapSizer(wx.HORIZONTAL)
else: self.cbuttonssizer = wx.GridBagSizer()
self.centerpanel = self.newPanel(vizpanel)
self.centerpanel.SetSizer(self.cbuttonssizer)
viz_pane.Add(self.centerpanel, 0, flag = wx.ALIGN_CENTER)
vizpanel.SetSizer(viz_pane)
if compact:
log_pane = LogPane(self, logpanel)
else:
log_pane = LogPaneToggleable(self, logpanel, [self.lowersizer])
left_pane.parentsizers.append(self.splitterwindow)
logpanel.SetSizer(log_pane)
if not compact:
self.lowersizer.Add(rightpanel, 1, wx.EXPAND)
else:
left_sizer.Add(logpanel, 1, wx.EXPAND)
self.lowersizer.Add(vizpanel, 1, wx.EXPAND)
self.mainsizer.Add(upperpanel, 0, wx.EXPAND)
self.mainsizer.Add(lowerpanel, 1, wx.EXPAND)
self.panel.SetSizer(self.mainsizer)
self.panel.Bind(wx.EVT_MOUSE_EVENTS, self.editbutton)
self.Bind(wx.EVT_CLOSE, self.kill)
self.mainsizer.Layout()
# This prevents resizing below a reasonnable value
# We sum the lowersizer (left pane / viz / log) min size
# the toolbar height and the statusbar/menubar sizes
minsize = [0, 0]
minsize[0] = self.lowersizer.GetMinSize()[0] # lower pane
minsize[1] = max(viz_pane.GetMinSize()[1], controls_sizer.GetMinSize()[1])
minsize[1] += self.toolbarsizer.GetMinSize()[1] # toolbar height
displaysize = wx.DisplaySize()
minsize[0] = min(minsize[0], displaysize[0])
minsize[1] = min(minsize[1], displaysize[1])
self.SetMinSize(self.ClientToWindowSize(minsize)) # client to window
self.cbuttons_reload()
def gui_set_connected(self):
self.xyb.enable()
self.zb.enable()
for control in self.printerControls:
control.Enable()
def gui_set_disconnected(self):
self.printbtn.Disable()
self.pausebtn.Disable()
self.recoverbtn.Disable()
for control in self.printerControls:
control.Disable()
self.xyb.disable()
self.zb.disable()
| gpl-3.0 |
rolando-contrib/scrapy | scrapy/utils/project.py | 26 | 2590 | import os
from six.moves import cPickle as pickle
import warnings
from importlib import import_module
from os.path import join, dirname, abspath, isabs, exists
from scrapy.utils.conf import closest_scrapy_cfg, get_config, init_env
from scrapy.settings import Settings
from scrapy.exceptions import NotConfigured
ENVVAR = 'SCRAPY_SETTINGS_MODULE'
DATADIR_CFG_SECTION = 'datadir'
def inside_project():
scrapy_module = os.environ.get('SCRAPY_SETTINGS_MODULE')
if scrapy_module is not None:
try:
import_module(scrapy_module)
except ImportError as exc:
warnings.warn("Cannot import scrapy settings module %s: %s" % (scrapy_module, exc))
else:
return True
return bool(closest_scrapy_cfg())
def project_data_dir(project='default'):
"""Return the current project data dir, creating it if it doesn't exist"""
if not inside_project():
raise NotConfigured("Not inside a project")
cfg = get_config()
if cfg.has_option(DATADIR_CFG_SECTION, project):
d = cfg.get(DATADIR_CFG_SECTION, project)
else:
scrapy_cfg = closest_scrapy_cfg()
if not scrapy_cfg:
raise NotConfigured("Unable to find scrapy.cfg file to infer project data dir")
d = abspath(join(dirname(scrapy_cfg), '.scrapy'))
if not exists(d):
os.makedirs(d)
return d
def data_path(path, createdir=False):
"""
Return the given path joined with the .scrapy data directory.
If given an absolute path, return it unmodified.
"""
if not isabs(path):
if inside_project():
path = join(project_data_dir(), path)
else:
path = join('.scrapy', path)
if createdir and not exists(path):
os.makedirs(path)
return path
def get_project_settings():
if ENVVAR not in os.environ:
project = os.environ.get('SCRAPY_PROJECT', 'default')
init_env(project)
settings = Settings()
settings_module_path = os.environ.get(ENVVAR)
if settings_module_path:
settings.setmodule(settings_module_path, priority='project')
# XXX: remove this hack
pickled_settings = os.environ.get("SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE")
if pickled_settings:
settings.setdict(pickle.loads(pickled_settings), priority='project')
# XXX: deprecate and remove this functionality
env_overrides = {k[7:]: v for k, v in os.environ.items() if
k.startswith('SCRAPY_')}
if env_overrides:
settings.setdict(env_overrides, priority='project')
return settings
| bsd-3-clause |
madhavajay/nd889 | 1_foundations/1_sudoku/tests/udacity/solution_test.py | 9 | 8615 | import solution
import unittest
class TestNakedTwins(unittest.TestCase):
before_naked_twins_1 = {'I6': '4', 'H9': '3', 'I2': '6', 'E8': '1', 'H3': '5', 'H7': '8', 'I7': '1', 'I4': '8',
'H5': '6', 'F9': '7', 'G7': '6', 'G6': '3', 'G5': '2', 'E1': '8', 'G3': '1', 'G2': '8',
'G1': '7', 'I1': '23', 'C8': '5', 'I3': '23', 'E5': '347', 'I5': '5', 'C9': '1', 'G9': '5',
'G8': '4', 'A1': '1', 'A3': '4', 'A2': '237', 'A5': '9', 'A4': '2357', 'A7': '27',
'A6': '257', 'C3': '8', 'C2': '237', 'C1': '23', 'E6': '579', 'C7': '9', 'C6': '6',
'C5': '37', 'C4': '4', 'I9': '9', 'D8': '8', 'I8': '7', 'E4': '6', 'D9': '6', 'H8': '2',
'F6': '125', 'A9': '8', 'G4': '9', 'A8': '6', 'E7': '345', 'E3': '379', 'F1': '6',
'F2': '4', 'F3': '23', 'F4': '1235', 'F5': '8', 'E2': '37', 'F7': '35', 'F8': '9',
'D2': '1', 'H1': '4', 'H6': '17', 'H2': '9', 'H4': '17', 'D3': '2379', 'B4': '27',
'B5': '1', 'B6': '8', 'B7': '27', 'E9': '2', 'B1': '9', 'B2': '5', 'B3': '6', 'D6': '279',
'D7': '34', 'D4': '237', 'D5': '347', 'B8': '3', 'B9': '4', 'D1': '5'}
possible_solutions_1 = [
{'G7': '6', 'G6': '3', 'G5': '2', 'G4': '9', 'G3': '1', 'G2': '8', 'G1': '7', 'G9': '5', 'G8': '4', 'C9': '1',
'C8': '5', 'C3': '8', 'C2': '237', 'C1': '23', 'C7': '9', 'C6': '6', 'C5': '37', 'A4': '2357', 'A9': '8',
'A8': '6', 'F1': '6', 'F2': '4', 'F3': '23', 'F4': '1235', 'F5': '8', 'F6': '125', 'F7': '35', 'F8': '9',
'F9': '7', 'B4': '27', 'B5': '1', 'B6': '8', 'B7': '27', 'E9': '2', 'B1': '9', 'B2': '5', 'B3': '6', 'C4': '4',
'B8': '3', 'B9': '4', 'I9': '9', 'I8': '7', 'I1': '23', 'I3': '23', 'I2': '6', 'I5': '5', 'I4': '8', 'I7': '1',
'I6': '4', 'A1': '1', 'A3': '4', 'A2': '237', 'A5': '9', 'E8': '1', 'A7': '27', 'A6': '257', 'E5': '347',
'E4': '6', 'E7': '345', 'E6': '579', 'E1': '8', 'E3': '79', 'E2': '37', 'H8': '2', 'H9': '3', 'H2': '9',
'H3': '5', 'H1': '4', 'H6': '17', 'H7': '8', 'H4': '17', 'H5': '6', 'D8': '8', 'D9': '6', 'D6': '279',
'D7': '34', 'D4': '237', 'D5': '347', 'D2': '1', 'D3': '79', 'D1': '5'},
{'I6': '4', 'H9': '3', 'I2': '6', 'E8': '1', 'H3': '5', 'H7': '8', 'I7': '1', 'I4': '8', 'H5': '6', 'F9': '7',
'G7': '6', 'G6': '3', 'G5': '2', 'E1': '8', 'G3': '1', 'G2': '8', 'G1': '7', 'I1': '23', 'C8': '5', 'I3': '23',
'E5': '347', 'I5': '5', 'C9': '1', 'G9': '5', 'G8': '4', 'A1': '1', 'A3': '4', 'A2': '237', 'A5': '9',
'A4': '2357', 'A7': '27', 'A6': '257', 'C3': '8', 'C2': '237', 'C1': '23', 'E6': '579', 'C7': '9', 'C6': '6',
'C5': '37', 'C4': '4', 'I9': '9', 'D8': '8', 'I8': '7', 'E4': '6', 'D9': '6', 'H8': '2', 'F6': '125',
'A9': '8', 'G4': '9', 'A8': '6', 'E7': '345', 'E3': '79', 'F1': '6', 'F2': '4', 'F3': '23', 'F4': '1235',
'F5': '8', 'E2': '3', 'F7': '35', 'F8': '9', 'D2': '1', 'H1': '4', 'H6': '17', 'H2': '9', 'H4': '17',
'D3': '79', 'B4': '27', 'B5': '1', 'B6': '8', 'B7': '27', 'E9': '2', 'B1': '9', 'B2': '5', 'B3': '6',
'D6': '279', 'D7': '34', 'D4': '237', 'D5': '347', 'B8': '3', 'B9': '4', 'D1': '5'}
]
before_naked_twins_2 = {'A1': '23', 'A2': '4', 'A3': '7', 'A4': '6', 'A5': '8', 'A6': '5', 'A7': '23', 'A8': '9',
'A9': '1', 'B1': '6', 'B2': '9', 'B3': '8', 'B4': '4', 'B5': '37', 'B6': '1', 'B7': '237',
'B8': '5', 'B9': '237', 'C1': '23', 'C2': '5', 'C3': '1', 'C4': '23', 'C5': '379',
'C6': '2379', 'C7': '8', 'C8': '6', 'C9': '4', 'D1': '8', 'D2': '17', 'D3': '9',
'D4': '1235', 'D5': '6', 'D6': '237', 'D7': '4', 'D8': '27', 'D9': '2357', 'E1': '5',
'E2': '6', 'E3': '2', 'E4': '8', 'E5': '347', 'E6': '347', 'E7': '37', 'E8': '1', 'E9': '9',
'F1': '4', 'F2': '17', 'F3': '3', 'F4': '125', 'F5': '579', 'F6': '279', 'F7': '6',
'F8': '8', 'F9': '257', 'G1': '1', 'G2': '8', 'G3': '6', 'G4': '35', 'G5': '345',
'G6': '34', 'G7': '9', 'G8': '27', 'G9': '27', 'H1': '7', 'H2': '2', 'H3': '4', 'H4': '9',
'H5': '1', 'H6': '8', 'H7': '5', 'H8': '3', 'H9': '6', 'I1': '9', 'I2': '3', 'I3': '5',
'I4': '7', 'I5': '2', 'I6': '6', 'I7': '1', 'I8': '4', 'I9': '8'}
possible_solutions_2 = [
{'A1': '23', 'A2': '4', 'A3': '7', 'A4': '6', 'A5': '8', 'A6': '5', 'A7': '23', 'A8': '9', 'A9': '1', 'B1': '6',
'B2': '9', 'B3': '8', 'B4': '4', 'B5': '37', 'B6': '1', 'B7': '237', 'B8': '5', 'B9': '237', 'C1': '23',
'C2': '5', 'C3': '1', 'C4': '23', 'C5': '79', 'C6': '79', 'C7': '8', 'C8': '6', 'C9': '4', 'D1': '8',
'D2': '17', 'D3': '9', 'D4': '1235', 'D5': '6', 'D6': '237', 'D7': '4', 'D8': '27', 'D9': '2357', 'E1': '5',
'E2': '6', 'E3': '2', 'E4': '8', 'E5': '347', 'E6': '347', 'E7': '37', 'E8': '1', 'E9': '9', 'F1': '4',
'F2': '17', 'F3': '3', 'F4': '125', 'F5': '579', 'F6': '279', 'F7': '6', 'F8': '8', 'F9': '257', 'G1': '1',
'G2': '8', 'G3': '6', 'G4': '35', 'G5': '345', 'G6': '34', 'G7': '9', 'G8': '27', 'G9': '27', 'H1': '7',
'H2': '2', 'H3': '4', 'H4': '9', 'H5': '1', 'H6': '8', 'H7': '5', 'H8': '3', 'H9': '6', 'I1': '9', 'I2': '3',
'I3': '5', 'I4': '7', 'I5': '2', 'I6': '6', 'I7': '1', 'I8': '4', 'I9': '8'},
{'A1': '23', 'A2': '4', 'A3': '7', 'A4': '6', 'A5': '8', 'A6': '5', 'A7': '23', 'A8': '9', 'A9': '1', 'B1': '6',
'B2': '9', 'B3': '8', 'B4': '4', 'B5': '3', 'B6': '1', 'B7': '237', 'B8': '5', 'B9': '237', 'C1': '23',
'C2': '5', 'C3': '1', 'C4': '23', 'C5': '79', 'C6': '79', 'C7': '8', 'C8': '6', 'C9': '4', 'D1': '8',
'D2': '17', 'D3': '9', 'D4': '1235', 'D5': '6', 'D6': '237', 'D7': '4', 'D8': '27', 'D9': '2357', 'E1': '5',
'E2': '6', 'E3': '2', 'E4': '8', 'E5': '347', 'E6': '347', 'E7': '37', 'E8': '1', 'E9': '9', 'F1': '4',
'F2': '17', 'F3': '3', 'F4': '125', 'F5': '579', 'F6': '279', 'F7': '6', 'F8': '8', 'F9': '257', 'G1': '1',
'G2': '8', 'G3': '6', 'G4': '35', 'G5': '345', 'G6': '34', 'G7': '9', 'G8': '27', 'G9': '27', 'H1': '7',
'H2': '2', 'H3': '4', 'H4': '9', 'H5': '1', 'H6': '8', 'H7': '5', 'H8': '3', 'H9': '6', 'I1': '9', 'I2': '3',
'I3': '5', 'I4': '7', 'I5': '2', 'I6': '6', 'I7': '1', 'I8': '4', 'I9': '8'}
]
def test_naked_twins(self):
self.assertTrue(solution.naked_twins(self.before_naked_twins_1) in self.possible_solutions_1,
"Your naked_twins function produced an unexpected board.")
def test_naked_twins2(self):
self.assertTrue(solution.naked_twins(self.before_naked_twins_2) in self.possible_solutions_2,
"Your naked_twins function produced an unexpected board.")
class TestDiagonalSudoku(unittest.TestCase):
diagonal_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
solved_diag_sudoku = {'G7': '8', 'G6': '9', 'G5': '7', 'G4': '3', 'G3': '2', 'G2': '4', 'G1': '6', 'G9': '5',
'G8': '1', 'C9': '6', 'C8': '7', 'C3': '1', 'C2': '9', 'C1': '4', 'C7': '5', 'C6': '3',
'C5': '2', 'C4': '8', 'E5': '9', 'E4': '1', 'F1': '1', 'F2': '2', 'F3': '9', 'F4': '6',
'F5': '5', 'F6': '7', 'F7': '4', 'F8': '3', 'F9': '8', 'B4': '7', 'B5': '1', 'B6': '6',
'B7': '2', 'B1': '8', 'B2': '5', 'B3': '3', 'B8': '4', 'B9': '9', 'I9': '3', 'I8': '2',
'I1': '7', 'I3': '8', 'I2': '1', 'I5': '6', 'I4': '5', 'I7': '9', 'I6': '4', 'A1': '2',
'A3': '7', 'A2': '6', 'E9': '7', 'A4': '9', 'A7': '3', 'A6': '5', 'A9': '1', 'A8': '8',
'E7': '6', 'E6': '2', 'E1': '3', 'E3': '4', 'E2': '8', 'E8': '5', 'A5': '4', 'H8': '6',
'H9': '4', 'H2': '3', 'H3': '5', 'H1': '9', 'H6': '1', 'H7': '7', 'H4': '2', 'H5': '8',
'D8': '9', 'D9': '2', 'D6': '8', 'D7': '1', 'D4': '4', 'D5': '3', 'D2': '7', 'D3': '6',
'D1': '5'}
def test_solve(self):
self.assertEqual(solution.solve(self.diagonal_grid), self.solved_diag_sudoku)
if __name__ == '__main__':
unittest.main()
| mit |
sarvex/tensorflow | tensorflow/examples/adding_an_op/zero_out_op_3.py | 23 | 1063 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ZeroOut op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_zero_out_module = tf.load_op_library(
os.path.join(tf.compat.v1.resource_loader.get_data_files_path(),
'zero_out_op_kernel_3.so'))
zero_out = _zero_out_module.zero_out
| apache-2.0 |
F1ashhimself/robotframework-selenium2library | src/Selenium2Library/keywords/_waiting.py | 26 | 4678 | import time
import robot
from keywordgroup import KeywordGroup
class _WaitingKeywords(KeywordGroup):
# Public
def wait_for_condition(self, condition, timeout=None, error=None):
"""Waits until the given `condition` is true or `timeout` expires.
`code` may contain multiple lines of code but must contain a
return statement (with the value to be returned) at the end
The `condition` can be arbitrary JavaScript expression but must contain a
return statement (with the value to be returned) at the end.
See `Execute JavaScript` for information about accessing the
actual contents of the window through JavaScript.
`error` can be used to override the default error message.
See `introduction` for more information about `timeout` and its
default value.
See also `Wait Until Page Contains`, `Wait Until Page Contains
Element`, `Wait Until Element Is Visible` and BuiltIn keyword
`Wait Until Keyword Succeeds`.
"""
if not error:
error = "Condition '%s' did not become true in <TIMEOUT>" % condition
self._wait_until(timeout, error,
lambda: self._current_browser().execute_script(condition) == True)
def wait_until_page_contains(self, text, timeout=None, error=None):
"""Waits until `text` appears on current page.
Fails if `timeout` expires before the text appears. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Contains Element`, `Wait For Condition`,
`Wait Until Element Is Visible` and BuiltIn keyword `Wait Until
Keyword Succeeds`.
"""
if not error:
error = "Text '%s' did not appear in <TIMEOUT>" % text
self._wait_until(timeout, error, self._is_text_present, text)
def wait_until_page_contains_element(self, locator, timeout=None, error=None):
"""Waits until element specified with `locator` appears on current page.
Fails if `timeout` expires before the element appears. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Contains`, `Wait For Condition`,
`Wait Until Element Is Visible` and BuiltIn keyword `Wait Until
Keyword Succeeds`.
"""
if not error:
error = "Element '%s' did not appear in <TIMEOUT>" % locator
self._wait_until(timeout, error, self._is_element_present, locator)
def wait_until_element_is_visible(self, locator, timeout=None, error=None):
"""Waits until element specified with `locator` is visible.
Fails if `timeout` expires before the element is visible. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Contains`, `Wait Until Page Contains
Element`, `Wait For Condition` and BuiltIn keyword `Wait Until Keyword
Succeeds`.
"""
def check_visibility():
visible = self._is_visible(locator)
if visible:
return
elif visible is None:
return error or "Element locator '%s' did not match any elements after %s" % (locator, self._format_timeout(timeout))
else:
return error or "Element '%s' was not visible in %s" % (locator, self._format_timeout(timeout))
self._wait_until_no_error(timeout, check_visibility)
# Private
def _wait_until(self, timeout, error, function, *args):
error = error.replace('<TIMEOUT>', self._format_timeout(timeout))
def wait_func():
return None if function(*args) else error
self._wait_until_no_error(timeout, wait_func)
def _wait_until_no_error(self, timeout, wait_func, *args):
timeout = robot.utils.timestr_to_secs(timeout) if timeout is not None else self._timeout_in_secs
maxtime = time.time() + timeout
while True:
timeout_error = wait_func(*args)
if not timeout_error: return
if time.time() > maxtime:
raise AssertionError(timeout_error)
time.sleep(0.2)
def _format_timeout(self, timeout):
timeout = robot.utils.timestr_to_secs(timeout) if timeout is not None else self._timeout_in_secs
return robot.utils.secs_to_timestr(timeout)
| apache-2.0 |
gangadhar-kadam/verve_live_erp | erpnext/selling/doctype/customer/customer.py | 6 | 8497 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.naming import make_autoname
from frappe import _, msgprint, throw
import frappe.defaults
from frappe.utils import flt
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.utilities.address_and_contact import load_address_and_contact
class Customer(TransactionBase):
def get_feed(self):
return self.customer_name
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self, "customer")
def autoname(self):
cust_master_name = frappe.defaults.get_global_default('cust_master_name')
if cust_master_name == 'Customer Name':
self.name = self.customer_name
else:
self.name = make_autoname(self.naming_series+'.#####')
def validate_values(self):
if frappe.defaults.get_global_default('cust_master_name') == 'Naming Series' and not self.naming_series:
frappe.throw(_("Series is mandatory"), frappe.MandatoryError)
def validate(self):
self.validate_values()
def update_lead_status(self):
if self.lead_name:
frappe.db.sql("update `tabLead` set status='Converted' where name = %s", self.lead_name)
def update_address(self):
frappe.db.sql("""update `tabAddress` set customer_name=%s, modified=NOW()
where customer=%s""", (self.customer_name, self.name))
def update_contact(self):
frappe.db.sql("""update `tabContact` set customer_name=%s, modified=NOW()
where customer=%s""", (self.customer_name, self.name))
def create_lead_address_contact(self):
if self.lead_name:
if not frappe.db.get_value("Address", {"lead": self.lead_name, "customer": self.name}):
frappe.db.sql("""update `tabAddress` set customer=%s, customer_name=%s where lead=%s""",
(self.name, self.customer_name, self.lead_name))
lead = frappe.db.get_value("Lead", self.lead_name, ["lead_name", "email_id", "phone", "mobile_no"], as_dict=True)
c = frappe.new_doc('Contact')
c.first_name = lead.lead_name
c.email_id = lead.email_id
c.phone = lead.phone
c.mobile_no = lead.mobile_no
c.customer = self.name
c.customer_name = self.customer_name
c.is_primary_contact = 1
c.flags.ignore_permissions = self.flags.ignore_permissions
c.autoname()
if not frappe.db.exists("Contact", c.name):
c.insert()
def on_update(self):
self.validate_name_with_customer_group()
self.update_lead_status()
self.update_address()
self.update_contact()
self.create_lead_address_contact()
def validate_name_with_customer_group(self):
if frappe.db.exists("Customer Group", self.name):
frappe.throw(_("A Customer Group exists with same name please change the Customer name or rename the Customer Group"))
def delete_customer_address(self):
addresses = frappe.db.sql("""select name, lead from `tabAddress`
where customer=%s""", (self.name,))
for name, lead in addresses:
if lead:
frappe.db.sql("""update `tabAddress` set customer=null, customer_name=null
where name=%s""", name)
else:
frappe.db.sql("""delete from `tabAddress` where name=%s""", name)
def delete_customer_contact(self):
for contact in frappe.db.sql_list("""select name from `tabContact`
where customer=%s""", self.name):
frappe.delete_doc("Contact", contact)
def on_trash(self):
self.delete_customer_address()
self.delete_customer_contact()
if self.lead_name:
frappe.db.sql("update `tabLead` set status='Interested' where name=%s",self.lead_name)
def after_rename(self, olddn, newdn, merge=False):
set_field = ''
if frappe.defaults.get_global_default('cust_master_name') == 'Customer Name':
frappe.db.set(self, "customer_name", newdn)
self.update_contact()
set_field = ", customer_name=%(newdn)s"
self.update_customer_address(newdn, set_field)
def update_customer_address(self, newdn, set_field):
frappe.db.sql("""update `tabAddress` set address_title=%(newdn)s
{set_field} where customer=%(newdn)s"""\
.format(set_field=set_field), ({"newdn": newdn}))
@frappe.whitelist()
def get_dashboard_info(customer):
if not frappe.has_permission("Customer", "read", customer):
frappe.msgprint(_("Not permitted"), raise_exception=True)
out = {}
for doctype in ["Opportunity", "Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
out[doctype] = frappe.db.get_value(doctype,
{"customer": customer, "docstatus": ["!=", 2] }, "count(*)")
billing = frappe.db.sql("""select sum(base_grand_total), sum(outstanding_amount)
from `tabSales Invoice`
where customer=%s
and docstatus = 1
and fiscal_year = %s""", (customer, frappe.db.get_default("fiscal_year")))
out["total_billing"] = billing[0][0]
out["total_unpaid"] = billing[0][1]
out["company_currency"] = frappe.db.sql_list("select distinct default_currency from tabCompany")
return out
def get_customer_list(doctype, txt, searchfield, start, page_len, filters):
if frappe.db.get_default("cust_master_name") == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
return frappe.db.sql("""select %s from `tabCustomer` where docstatus < 2
and (%s like %s or customer_name like %s) order by
case when name like %s then 0 else 1 end,
case when customer_name like %s then 0 else 1 end,
name, customer_name limit %s, %s""" %
(", ".join(fields), searchfield, "%s", "%s", "%s", "%s", "%s", "%s"),
("%%%s%%" % txt, "%%%s%%" % txt, "%%%s%%" % txt, "%%%s%%" % txt, start, page_len))
def check_credit_limit(customer, company):
customer_outstanding = get_customer_outstanding(customer, company)
credit_limit = get_credit_limit(customer, company)
if credit_limit > 0 and flt(customer_outstanding) > credit_limit:
msgprint(_("Credit limit has been crossed for customer {0} {1}/{2}")
.format(customer, customer_outstanding, credit_limit))
# If not authorized person raise exception
credit_controller = frappe.db.get_value('Accounts Settings', None, 'credit_controller')
if not credit_controller or credit_controller not in frappe.user.get_roles():
throw(_("Please contact to the user who have Sales Master Manager {0} role")
.format(" / " + credit_controller if credit_controller else ""))
def get_customer_outstanding(customer, company):
# Outstanding based on GL Entries
outstanding_based_on_gle = frappe.db.sql("""select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabGL Entry` where party_type = 'Customer' and party = %s and company=%s""", (customer, company))
outstanding_based_on_gle = flt(outstanding_based_on_gle[0][0]) if outstanding_based_on_gle else 0
# Outstanding based on Sales Order
outstanding_based_on_so = frappe.db.sql("""
select sum(base_grand_total*(100 - ifnull(per_billed, 0))/100)
from `tabSales Order`
where customer=%s and docstatus = 1 and company=%s
and ifnull(per_billed, 0) < 100 and status != 'Stopped'""", (customer, company))
outstanding_based_on_so = flt(outstanding_based_on_so[0][0]) if outstanding_based_on_so else 0.0
# Outstanding based on Delivery Note
outstanding_based_on_dn = frappe.db.sql("""
select
sum(
(
(ifnull(dn_item.amount, 0) - ifnull((select sum(ifnull(amount, 0))
from `tabSales Invoice Item`
where ifnull(dn_detail, '') = dn_item.name and docstatus = 1), 0)
)/dn.base_net_total
)*dn.base_grand_total
)
from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item
where
dn.name = dn_item.parent and dn.customer=%s and dn.company=%s
and dn.docstatus = 1 and dn.status != 'Stopped'
and ifnull(dn_item.against_sales_order, '') = ''
and ifnull(dn_item.against_sales_invoice, '') = ''
and ifnull(dn_item.amount, 0) > ifnull((select sum(ifnull(amount, 0))
from `tabSales Invoice Item`
where ifnull(dn_detail, '') = dn_item.name and docstatus = 1), 0)""", (customer, company))
outstanding_based_on_dn = flt(outstanding_based_on_dn[0][0]) if outstanding_based_on_dn else 0.0
return outstanding_based_on_gle + outstanding_based_on_so + outstanding_based_on_dn
def get_credit_limit(customer, company):
credit_limit, customer_group = frappe.db.get_value("Customer", customer, ["credit_limit", "customer_group"])
if not credit_limit:
credit_limit = frappe.db.get_value("Customer Group", customer_group, "credit_limit") or \
frappe.db.get_value("Company", company, "credit_limit")
return credit_limit
| agpl-3.0 |
larrybradley/astropy | astropy/table/__init__.py | 8 | 3387 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import config as _config
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
__all__ = ['BST', 'Column', 'ColumnGroups', 'ColumnInfo', 'Conf',
'JSViewer', 'MaskedColumn', 'NdarrayMixin', 'QTable', 'Row',
'SCEngine', 'SerializedColumn', 'SortedArray', 'StringTruncateWarning',
'Table', 'TableAttribute', 'TableColumns', 'TableFormatter',
'TableGroups', 'TableMergeError', 'TableReplaceWarning', 'conf',
'connect', 'hstack', 'join', 'registry', 'represent_mixins_as_columns',
'setdiff', 'unique', 'vstack', 'dstack', 'conf', 'join_skycoord',
'join_distance', 'PprintIncludeExclude']
class Conf(_config.ConfigNamespace): # noqa
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <https://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
[],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'list')
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases.")
conf = Conf() # noqa
from . import connect # noqa: E402
from .groups import TableGroups, ColumnGroups # noqa: E402
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning, TableAttribute,
PprintIncludeExclude) # noqa: E402
from .operations import (join, setdiff, hstack, dstack, vstack, unique, # noqa: E402
TableMergeError, join_skycoord, join_distance) # noqa: E402
from .bst import BST # noqa: E402
from .sorted_array import SortedArray # noqa: E402
from .soco import SCEngine # noqa: E402
from .serialize import SerializedColumn, represent_mixins_as_columns # noqa: E402
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from astropy.io import registry # noqa: E402
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
import astropy.io.ascii.connect
import astropy.io.fits.connect
import astropy.io.misc.connect
import astropy.io.votable.connect
import astropy.io.misc.asdf.connect
import astropy.io.misc.pandas.connect # noqa: F401
| bsd-3-clause |
RenaudParis/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/handlers.py | 86 | 12804 | import cgi
import json
import os
import traceback
import urllib
import urlparse
from constants import content_types
from pipes import Pipeline, template
from ranges import RangeParser
from request import Authentication
from response import MultipartContent
from utils import HTTPException
__all__ = ["file_handler", "python_script_handler",
"FunctionHandler", "handler", "json_handler",
"as_is_handler", "ErrorHandler", "BasicAuthHandler"]
def guess_content_type(path):
ext = os.path.splitext(path)[1].lstrip(".")
if ext in content_types:
return content_types[ext]
return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"):
if base_path is None:
base_path = request.doc_root
path = request.url_parts.path
if path.startswith(url_base):
path = path[len(url_base):]
if ".." in path:
raise HTTPException(404)
new_path = os.path.join(base_path, path)
# Otherwise setting path to / allows access outside the root directory
if not new_path.startswith(base_path):
raise HTTPException(404)
return new_path
class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
if not request.url_parts.path.endswith("/"):
raise HTTPException(404)
path = filesystem_path(self.base_path, request, self.url_base)
if not os.path.isdir(path):
raise HTTPException(404, "%s is not a directory" % path)
response.headers = [("Content-Type", "text/html")]
response.content = """<!doctype html>
<meta name="viewport" content="width=device-width">
<title>Directory listing for %(path)s</title>
<h1>Directory listing for %(path)s</h1>
<ul>
%(items)s
</li>
""" % {"path": cgi.escape(request.url_parts.path),
"items": "\n".join(self.list_items(request, path))}
def list_items(self, request, path):
# TODO: this won't actually list all routes, only the
# ones that correspond to a real filesystem path. It's
# not possible to list every route that will match
# something, but it should be possible to at least list the
# statically defined ones
base_path = request.url_parts.path
if not base_path.endswith("/"):
base_path += "/"
if base_path != "/":
link = urlparse.urljoin(base_path, "..")
yield ("""<li class="dir"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": ".."})
for item in sorted(os.listdir(path)):
link = cgi.escape(urllib.quote(item))
if os.path.isdir(os.path.join(path, item)):
link += "/"
class_ = "dir"
else:
class_ = "file"
yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": cgi.escape(item), "class": class_})
directory_handler = DirectoryHandler()
class FileHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.directory_handler = DirectoryHandler(self.base_path, self.url_base)
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
if os.path.isdir(path):
return self.directory_handler(request, response)
try:
#This is probably racy with some other process trying to change the file
file_size = os.stat(path).st_size
response.headers.update(self.get_headers(request, path))
if "Range" in request.headers:
try:
byte_ranges = RangeParser()(request.headers['Range'], file_size)
except HTTPException as e:
if e.code == 416:
response.headers.set("Content-Range", "bytes */%i" % file_size)
raise
else:
byte_ranges = None
data = self.get_data(response, path, byte_ranges)
response.content = data
query = urlparse.parse_qs(request.url_parts.query)
pipeline = None
if "pipe" in query:
pipeline = Pipeline(query["pipe"][-1])
elif os.path.splitext(path)[0].endswith(".sub"):
pipeline = Pipeline("sub")
if pipeline is not None:
response = pipeline(request, response)
return response
except (OSError, IOError):
raise HTTPException(404)
def get_headers(self, request, path):
rv = self.default_headers(path)
rv.extend(self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")))
rv.extend(self.load_headers(request, path))
return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path) as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data)
return [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges):
with open(path, 'rb') as f:
if byte_ranges is None:
return f.read()
else:
response.status = 206
if len(byte_ranges) > 1:
parts_content_type, content = self.set_response_multipart(response,
byte_ranges,
f)
for byte_range in byte_ranges:
content.append_part(self.get_range_data(f, byte_range),
parts_content_type,
[("Content-Range", byte_range.header_value())])
return content
else:
response.headers.set("Content-Range", byte_ranges[0].header_value())
return self.get_range_data(f, byte_ranges[0])
def set_response_multipart(self, response, ranges, f):
parts_content_type = response.headers.get("Content-Type")
if parts_content_type:
parts_content_type = parts_content_type[-1]
else:
parts_content_type = None
content = MultipartContent()
response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
return parts_content_type, content
def get_range_data(self, f, byte_range):
f.seek(byte_range.lower)
return f.read(byte_range.upper - byte_range.lower)
def default_headers(self, path):
return [("Content-Type", guess_content_type(path))]
file_handler = FileHandler()
class PythonScriptHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
environ = {"__file__": path}
execfile(path, environ, environ)
if "main" in environ:
handler = FunctionHandler(environ["main"])
handler(request, response)
else:
raise HTTPException(500, "No main function in script %s" % path)
except IOError:
raise HTTPException(404)
python_script_handler = PythonScriptHandler()
class FunctionHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
try:
rv = self.func(request, response)
except Exception:
msg = traceback.format_exc()
raise HTTPException(500, message=msg)
if rv is not None:
if isinstance(rv, tuple):
if len(rv) == 3:
status, headers, content = rv
response.status = status
elif len(rv) == 2:
headers, content = rv
else:
raise HTTPException(500)
response.headers.update(headers)
else:
content = rv
response.content = content
#The generic name here is so that this can be used as a decorator
def handler(func):
return FunctionHandler(func)
class JsonHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
return FunctionHandler(self.handle_request)(request, response)
def handle_request(self, request, response):
rv = self.func(request, response)
response.headers.set("Content-Type", "application/json")
enc = json.dumps
if isinstance(rv, tuple):
rv = list(rv)
value = tuple(rv[:-1] + [enc(rv[-1])])
length = len(value[-1])
else:
value = enc(rv)
length = len(value)
response.headers.set("Content-Length", length)
return value
def json_handler(func):
return JsonHandler(func)
class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
with open(path) as f:
response.writer.write_content(f.read())
response.close_connection = True
except IOError:
raise HTTPException(404)
as_is_handler = AsIsHandler()
class BasicAuthHandler(object):
def __init__(self, handler, user, password):
"""
A Basic Auth handler
:Args:
- handler: a secondary handler for the request after authentication is successful (example file_handler)
- user: string of the valid user name or None if any / all credentials are allowed
- password: string of the password required
"""
self.user = user
self.password = password
self.handler = handler
def __call__(self, request, response):
if "authorization" not in request.headers:
response.status = 401
response.headers.set("WWW-Authenticate", "Basic")
return response
else:
auth = Authentication(request.headers)
if self.user is not None and (self.user != auth.username or self.password != auth.password):
response.set_error(403, "Invalid username or password")
return response
return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object):
def __init__(self, status):
self.status = status
def __call__(self, request, response):
response.set_error(self.status)
class StaticHandler(object):
def __init__(self, path, format_args, content_type, **headers):
"""Hander that reads a file from a path and substitutes some fixed data
:param path: Path to the template file to use
:param format_args: Dictionary of values to substitute into the template file
:param content_type: Content type header to server the response with
:param headers: List of headers to send with responses"""
with open(path) as f:
self.data = f.read() % format_args
self.resp_headers = [("Content-Type", content_type)]
for k, v in headers.iteritems():
resp_headers.append((k.replace("_", "-"), v))
self.handler = handler(self.handle_request)
def handle_request(self, request, response):
return self.resp_headers, self.data
def __call__(self, request, response):
rv = self.handler(request, response)
return rv
| mpl-2.0 |
mohamedhagag/community-addons | report_xls/ir_report.py | 8 | 1180 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class IrActionsReportXml(models.Model):
_inherit = 'ir.actions.report.xml'
report_type = fields.Selection(selection_add=[('xls', 'XLS')])
| agpl-3.0 |
google-research/google-research | summae/process_rocstories.py | 1 | 3243 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Process ROCStories data into tfrecords.
process_rocstories -- --raw_dir=$ROC_FILES \
--output_base=/tmp/rocstories_spring_winter_train --vocab_file=$VOCAB
We produce 20 shards of data, and will use shards 0-17 for train, 18 for valid,
19 for test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf # tf
from summae import data_util
from summae import util
FLAGS = flags.FLAGS
flags.DEFINE_string('raw_dir', '',
'paths to rocstories raw data.')
flags.DEFINE_integer('num_shards', 20, 'Number of shards')
flags.DEFINE_string('output_base', '',
('Will output output_base.x.tfrecord for'
'x=0, ... num_shards-1.'))
flags.DEFINE_string('vocab_file', '',
'If specified, encode using t2t SubwordTextEncoder.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
paths = [os.path.join(FLAGS.raw_dir, x) for x in [
'ROCStories__spring2016 - ROCStories_spring2016.csv',
'ROCStories_winter2017 - ROCStories_winter2017.csv']]
assert paths, FLAGS.raw_dir
logging.info('Reading from: %s', paths)
logging.info('Loading vocabulary file from %s', FLAGS.vocab_file)
tk = util.get_tokenizer(FLAGS.vocab_file)
assert tk
writers = data_util.get_filewriters(FLAGS.output_base, 'all',
FLAGS.num_shards)
sharder = data_util.get_text_sharder(FLAGS.num_shards)
count = 0
for p in paths:
logging.info('Opening %s', p)
with tf.gfile.Open(p) as f:
reader = csv.reader(f)
next(reader) # Header
for r in reader:
assert len(r) == 7
storyid = r[0]
storytitle = r[1]
sentences = r[2:7]
context_features = tf.train.Features(feature={
'storyid': data_util.tf_bytes_feature(storyid),
'storytitle': data_util.tf_bytes_feature(storytitle),
})
seq_ex = data_util.sents2seqex(sentences,
tk,
context_features=context_features,
add_eos=False,
add_untokenized=True)
writers[sharder(storyid)].write(seq_ex.SerializeToString())
count += 1
data_util.close_writers(writers)
logging.info('Wrote %d records to %d shards.', count, FLAGS.num_shards)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
susuchina/ERPNEXT | erpnext/projects/report/daily_time_log_summary/daily_time_log_summary.py | 64 | 2651 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
def execute(filters=None):
if not filters:
filters = {}
elif filters.get("from_date") or filters.get("to_date"):
filters["from_time"] = "00:00:00"
filters["to_time"] = "24:00:00"
columns = [_("Time Log") + ":Link/Time Log:120", _("Employee") + "::150", _("From Datetime") + "::140",
_("To Datetime") + "::140", _("Hours") + "::70", _("Activity Type") + "::120", _("Task") + ":Link/Task:150",
_("Task Subject") + "::180", _("Project") + ":Link/Project:120", _("Status") + "::70"]
user_map = get_user_map()
task_map = get_task_map()
conditions = build_conditions(filters)
time_logs = frappe.db.sql("""select * from `tabTime Log`
where docstatus < 2 %s order by owner asc""" % (conditions, ), filters, as_dict=1)
if time_logs:
users = [time_logs[0].owner]
data = []
total_hours = total_employee_hours = count = 0
for tl in time_logs:
if tl.owner not in users:
users.append(tl.owner)
data.append(["", "", "", "Total", total_employee_hours, "", "", "", "", ""])
total_employee_hours = 0
data.append([tl.name, user_map[tl.owner], tl.from_time, tl.to_time, tl.hours,
tl.activity_type, tl.task, task_map.get(tl.task), tl.project, tl.status])
count += 1
total_hours += flt(tl.hours)
total_employee_hours += flt(tl.hours)
if count == len(time_logs):
data.append(["", "", "", "Total Hours", total_employee_hours, "", "", "", "", ""])
if total_hours:
data.append(["", "", "", "Grand Total", total_hours, "", "", "", "", ""])
return columns, data
def get_user_map():
users = frappe.db.sql("""select name,
concat(first_name, if(last_name, (' ' + last_name), '')) as fullname
from tabUser""", as_dict=1)
user_map = {}
for p in users:
user_map.setdefault(p.name, []).append(p.fullname)
return user_map
def get_task_map():
tasks = frappe.db.sql("""select name, subject from tabTask""", as_dict=1)
task_map = {}
for t in tasks:
task_map.setdefault(t.name, []).append(t.subject)
return task_map
def build_conditions(filters):
conditions = ""
if filters.get("from_date"):
conditions += " and from_time >= timestamp(%(from_date)s, %(from_time)s)"
if filters.get("to_date"):
conditions += " and to_time <= timestamp(%(to_date)s, %(to_time)s)"
from frappe.desk.reportview import build_match_conditions
match_conditions = build_match_conditions("Time Log")
if match_conditions:
conditions += " and %s" % match_conditions
return conditions
| agpl-3.0 |
MQQiang/kbengine | kbe/src/lib/python/Lib/encodings/iso8859_11.py | 272 | 12335 | """ Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-11',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
'\u0e24' # 0xC4 -> THAI CHARACTER RU
'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
'\u0e26' # 0xC6 -> THAI CHARACTER LU
'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
'\u0e50' # 0xF0 -> THAI DIGIT ZERO
'\u0e51' # 0xF1 -> THAI DIGIT ONE
'\u0e52' # 0xF2 -> THAI DIGIT TWO
'\u0e53' # 0xF3 -> THAI DIGIT THREE
'\u0e54' # 0xF4 -> THAI DIGIT FOUR
'\u0e55' # 0xF5 -> THAI DIGIT FIVE
'\u0e56' # 0xF6 -> THAI DIGIT SIX
'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
'\u0e59' # 0xF9 -> THAI DIGIT NINE
'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
stonelake/pyoptimization | pyopt/packing/rectangular/test_rpacker.py | 1 | 3203 | from unittest import TestCase
from rpacker import RPacker
from box import Box
__author__ = 'Alex Baranov'
class TestRPacker(TestCase):
"""
Test for RPacker
"""
def setUp(self):
"""
The setup test method.
"""
self.packer = RPacker()
def test_included_box_remove(self):
"""
Verify the included containers are correctly removed
"""
containers = [Box(size=(2, 1)),
Box(size=(1, 3), bottom_left=(1, 0)),
Box(size=(1, 2), bottom_left=(1, 1)),
Box(size=(1, 1), bottom_left=(3, 3))]
not_included_containers = self.packer.remove_included_containers(containers)
self.assertEqual(len(not_included_containers), 3)
self.assertIn(containers[0], not_included_containers)
self.assertIn(containers[1], not_included_containers)
self.assertIn(containers[3], not_included_containers)
self.assertNotIn(containers[2], not_included_containers)
def test_split_included_containers(self):
"""
Verify the included containers remove function.
"""
containers = [Box(size=(4, 3)),
Box(size=(2, 5))]
element = Box(size=(1, 1), bottom_left=(1, 4))
split_containers = self.packer.split_intersected_containers(containers, element)
self.assertEqual(len(split_containers), 3)
self.assertEqual(split_containers[0].size, (4, 3))
self.assertEqual(split_containers[0].polus, (0, 0))
self.assertEqual(split_containers[1].size, (1, 5))
self.assertEqual(split_containers[1].polus, (0, 0))
self.assertEqual(split_containers[2].size, (2, 4))
self.assertEqual(split_containers[2].polus, (0, 0))
def test_pack_sample_scenarios(self):
"""
Verify the packing sample scenarios.
"""
c = Box(size=(2, 2), bottom_left=(1, 1))
p = Box(size=(1, 1))
packed, params = self.packer.pack(boxes=(p, ), containers=(c, ))
self.assertEqual(len(packed), 1)
p1 = packed.pop()
self.assertIsNotNone(p1)
self.assertEqual(p1.size, (1, 1))
self.assertEqual(p1.polus, (1, 1))
def test_pack_sample_scenario2(self):
"""
Verify the packing sample scenarios #2.
"""
c = Box(size=(5, 8), bottom_left=(0, 0))
elements = [Box(size=(4, 2)),
Box(size=(3, 3)),
Box(size=(2, 4))]
packed, params = self.packer.pack(elements, containers=(c, ))
self.assertEqual(len(packed), 3)
p1, p2, p3 = packed
# check first packed
self.assertIsNotNone(p1)
self.assertEqual(p1.size, (4, 2))
self.assertEqual(p1.polus, (0, 0))
# check second packed.
self.assertIsNotNone(p2)
self.assertEqual(p2.size, (3, 3))
self.assertEqual(p2.polus, (0, 2))
# check third packed.
self.assertIsNotNone(p3)
self.assertEqual(p3.size, (2, 4))
self.assertEqual(p3.polus, (3, 2)) | apache-2.0 |
maruschin/l-system | lsystem/lfigure.py | 1 | 2037 | # L-System package
import numpy as np
class LFigure:
def __init__(self, lsystem, size):
self.lsystem = lsystem
self.rule = self.lsystem.rule
self.angel = self.lsystem.angel
self.make_dimensions(size)
def make_dimensions(self, size):
lines = make_dimensions(self.angel, self.rule, size)
self.size = size
self.lines = lines
def rotate(point, phi):
rotation_matrix = np.array([
[ np.cos(phi), np.sin(phi)],
[-np.sin(phi), np.cos(phi)]
])
return point.dot(rotation_matrix)
def rad_to_euc(r, phi):
x, y = rotate(np.array([r, r]), phi)
return x, y
def make_dimensions(angel, rule, size):
'''Count step length and start point to fit in canvas size'''
x_dim = y_dim = np.array((0, 0))
start_point = np.array((0,0), dtype='float64')
curr_angel = np.pi
save_angel = []
step_length = 1
lines = []
for r in rule:
if r in 'fF':
end_point = start_point + np.array(rad_to_euc(step_length, curr_angel))
lines.append(np.array([start_point, end_point]))
start_point = end_point
# Find min and max border of our figure
dimension = lambda dim, dot: (min(dim[0], dot), max(dim[1], dot))
x_dim = dimension(x_dim, end_point[0])
y_dim = dimension(y_dim, end_point[1])
elif r == '-':
curr_angel -= angel
elif r == '+':
curr_angel += angel
elif r == '[':
save_angel.append((curr_angel, start_point.copy()))
elif r == ']':
curr_angel, start_point = save_angel.pop()
else:
pass
# Move points to beginning of coordinate
lines = [line-[x_dim[0], y_dim[0]] for line in lines]
# Scale figure to fit in canvas
scale = min(size[0]/sum([abs(x) for x in x_dim]),
size[1]/sum([abs(y) for y in y_dim]))
lines = [line*scale for line in lines]
return lines
if __name__ == "__main__":
pass | mit |
espadrine/opera | chromium/src/third_party/chromite/lib/partial_mock.py | 2 | 17522 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains functionality used to implement a partial mock."""
import collections
import logging
import mock
import os
import re
from chromite.lib import cros_build_lib
from chromite.lib import osutils
class Comparator(object):
"""Base class for all comparators."""
def Match(self, arg):
"""Match the comparator against an argument."""
raise NotImplementedError, 'method must be implemented by a subclass.'
def Equals(self, rhs):
"""Returns whether rhs compares the same thing."""
return type(self) == type(rhs) and self.__dict__ == rhs.__dict__
def __eq__(self, rhs):
return self.Equals(rhs)
def __ne__(self, rhs):
return not self.Equals(rhs)
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter."""
def __init__(self, key):
"""Initialize.
Arguments:
key: Any thing that could be in a list or a key in a dict
"""
Comparator.__init__(self)
self._key = key
def Match(self, arg):
try:
return self._key in arg
except TypeError:
return False
def __repr__(self):
return '<sequence or map containing %r>' % str(self._key)
class Regex(Comparator):
"""Checks if a string matches a regular expression."""
def __init__(self, pattern, flags=0):
"""Initialize.
Arguments:
pattern: is the regular expression to search for
flags: passed to re.compile function as the second argument
"""
Comparator.__init__(self)
self.pattern = pattern
self.flags = flags
self.regex = re.compile(pattern, flags=flags)
def Match(self, arg):
try:
return self.regex.search(arg) is not None
except TypeError:
return False
def __repr__(self):
s = '<regular expression %r' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class ListRegex(Regex):
"""Checks if an iterable of strings matches a regular expression."""
@staticmethod
def _ProcessArg(arg):
if not isinstance(arg, basestring):
return ' '.join(arg)
return arg
def Match(self, arg):
try:
return self.regex.search(self._ProcessArg(arg)) is not None
except TypeError:
return False
class Ignore(Comparator):
"""Used when we don't care about an argument of a method call."""
def Match(self, _arg):
return True
def __repr__(self):
return '<IgnoreArg>'
def _RecursiveCompare(lhs, rhs):
"""Compare parameter specs recursively.
Arguments:
lhs, rhs: Function parameter specs to compare.
equality: In the case of comparing Comparator objects, True means we call
the Equals() function. We call Match() if set to False (default).
"""
if isinstance(lhs, Comparator):
return lhs.Match(rhs)
elif type(lhs) != type(rhs):
return False
elif isinstance(lhs, (tuple, list)):
return (len(lhs) == len(rhs) and
all(_RecursiveCompare(i, j) for i, j in zip(lhs, rhs)))
elif isinstance(lhs, dict):
return _RecursiveCompare(sorted(lhs.iteritems()), sorted(rhs.iteritems()))
else:
return lhs == rhs
def ListContains(small, big, strict=False):
"""Looks for a sublist within a bigger list.
Arguments:
small: The sublist to search for.
big: The list to search in.
strict: If True, all items in list must be adjacent.
"""
if strict:
for i in xrange(len(big) - len(small) + 1):
if _RecursiveCompare(small, big[i:i + len(small)]):
return True
return False
else:
j = 0
for i in xrange(len(small)):
for j in xrange(j, len(big)):
if _RecursiveCompare(small[i], big[j]):
j += 1
break
else:
return False
return True
def DictContains(small, big):
"""Looks for a subset within a dictionary.
Arguments:
small: The sub-dict to search for.
big: The dict to search in.
"""
for k, v in small.iteritems():
if k not in big or not _RecursiveCompare(v, big[k]):
return False
return True
class MockedCallResults(object):
"""Implements internal result specification for partial mocks.
Used with the PartialMock class.
Internal results are different from external results (return values,
side effects, exceptions, etc.) for functions. Internal results are
*used* by the partial mock to generate external results. Often internal
results represent the external results of the dependencies of the function
being partially mocked. Of course, the partial mock can just pass through
the internal results to become external results.
"""
Params = collections.namedtuple('Params', ['args', 'kwargs'])
MockedCall = collections.namedtuple(
'MockedCall', ['params', 'strict', 'result', 'side_effect'])
def __init__(self, name):
"""Initialize.
Arguments:
name: The name given to the mock. Will be used in debug output.
"""
self.name = name
self.mocked_calls = []
self.default_result, self.default_side_effect = None, None
@staticmethod
def AssertArgs(args, kwargs):
"""Verify arguments are of expected type."""
assert isinstance(args, (tuple))
if kwargs:
assert isinstance(kwargs, dict)
def AddResultForParams(self, args, result, kwargs=None, side_effect=None,
strict=True):
"""Record the internal results of a given partial mock call.
Arguments:
args: A list containing the positional args an invocation must have for
it to match the internal result. The list can contain instances of
meta-args (such as IgnoreArg, Regex, In, etc.). Positional argument
matching is always *strict*, meaning extra positional arguments in
the invocation are not allowed.
result: The internal result that will be matched for the command
invocation specified.
kwargs: A dictionary containing the keyword args an invocation must have
for it to match the internal result. The dictionary can contain
instances of meta-args (such as IgnoreArg, Regex, In, etc.). Keyword
argument matching is by default *strict*, but can be modified by the
|strict| argument.
side_effect: A functor that gets called every time a partially mocked
function is invoked. The arguments the partial mock is invoked with are
passed to the functor. This is similar to how side effects work for
mocks.
strict: Specifies whether keyword are matched strictly. With strict
matching turned on, any keyword args a partial mock is invoked with that
are not specified in |kwargs| will cause the match to fail.
"""
self.AssertArgs(args, kwargs)
if kwargs is None:
kwargs = {}
params = self.Params(args=args, kwargs=kwargs)
dup, filtered = cros_build_lib.PredicateSplit(
lambda mc: mc.params == params, self.mocked_calls)
new = self.MockedCall(params=params, strict=strict, result=result,
side_effect=side_effect)
filtered.append(new)
self.mocked_calls = filtered
if dup:
logging.debug('%s: replacing mock for arguments %r:\n%r -> %r',
self.name, params, dup, new)
def SetDefaultResult(self, result, side_effect=None):
"""Set the default result for an unmatched partial mock call.
Arguments:
result, side_effect: See AddResultsForParams.
"""
self.default_result, self.default_side_effect = result, side_effect
def LookupResult(self, args, kwargs=None, hook_args=None, hook_kwargs=None):
"""For a given mocked function call lookup the recorded internal results.
args: A list containing positional args the function was called with.
kwargs: A dict containing keyword args the function was called with.
hook_args: A list of positional args to call the hook with.
hook_kwargs: A dict of key/value args to call the hook with.
Returns:
The recorded result for the invocation.
Raises:
AssertionError when the call is not mocked, or when there is more
than one mock that matches.
"""
def filter_fn(mc):
if mc.strict:
return _RecursiveCompare(mc.params, params)
return (DictContains(mc.params.kwargs, kwargs) and
_RecursiveCompare(mc.params.args, args))
self.AssertArgs(args, kwargs)
if kwargs is None:
kwargs = {}
params = self.Params(args, kwargs)
matched, _ = cros_build_lib.PredicateSplit(filter_fn, self.mocked_calls)
if len(matched) > 1:
raise AssertionError(
"%s: args %r matches more than one mock:\n%s"
% (self.name, params, '\n'.join([repr(c) for c in matched])))
elif matched:
side_effect, result = matched[0].side_effect, matched[0].result
elif (self.default_result, self.default_side_effect) != (None, None):
side_effect, result = self.default_side_effect, self.default_result
else:
raise AssertionError("%s: %r not mocked!" % (self.name, params))
if side_effect:
assert(hook_args is not None)
assert(hook_kwargs is not None)
hook_result = side_effect(*hook_args, **hook_kwargs)
if hook_result is not None:
return hook_result
return result
class PartialMock(object):
"""Provides functionality for partially mocking out a function or method.
Partial mocking is useful in cases where the side effects of a function or
method are complex, and so re-using the logic of the function with
*dependencies* mocked out is preferred over mocking out the entire function
and re-implementing the side effect (return value, state modification) logic
in the test. It is also useful for creating re-usable mocks.
"""
TARGET = None
ATTRS = None
def __init__(self, create_tempdir=False):
"""Initialize.
Arguments:
create_tempdir: If set to True, the partial mock will create its own
temporary directory when start() is called, and will set self.tempdir to
the path of the directory. The directory is deleted when stop() is
called.
"""
self.backup = {}
self.patchers = {}
self.patched = {}
self.external_patchers = []
self.create_tempdir = create_tempdir
# Set when start() is called.
self._tempdir_obj = None
self.tempdir = None
self.__saved_env__ = None
self.started = False
self._results = {}
if not all([self.TARGET, self.ATTRS]) and any([self.TARGET, self.ATTRS]):
raise AssertionError('TARGET=%r but ATTRS=%r!'
% (self.TARGET, self.ATTRS))
if self.ATTRS is not None:
for attr in self.ATTRS:
self._results[attr] = MockedCallResults(attr)
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def PreStart(self):
"""Called at the beginning of start(). Child classes can override this.
If __init__ was called with |create_tempdir| set, then self.tempdir will
point to an existing temporary directory when this function is called.
"""
def PreStop(self):
"""Called at the beginning of stop(). Child classes can override this.
If __init__ was called with |create_tempdir| set, then self.tempdir will
not be deleted until after this function returns.
"""
def StartPatcher(self, patcher):
"""PartialMock will stop the patcher when stop() is called."""
patcher.start()
self.external_patchers.append(patcher)
def _start(self):
if not all([self.TARGET, self.ATTRS]):
return
chunks = self.TARGET.rsplit('.', 1)
module = cros_build_lib.load_module(chunks[0])
cls = getattr(module, chunks[1])
for attr in self.ATTRS:
self.backup[attr] = getattr(cls, attr)
src_attr = '_target%s' % attr if attr.startswith('__') else attr
if hasattr(self.backup[attr], 'reset_mock'):
raise AssertionError(
'You are trying to nest mock contexts - this is currently '
'unsupported by PartialMock.')
if callable(self.backup[attr]):
patcher = mock.patch.object(cls, attr, autospec=True,
side_effect=getattr(self, src_attr))
else:
patcher = mock.patch.object(cls, attr, getattr(self, src_attr))
self.patched[attr] = patcher.start()
self.patchers[attr] = patcher
return self
def start(self):
"""Activates the mock context."""
try:
self.__saved_env__ = os.environ.copy()
self.tempdir = None
if self.create_tempdir:
self._tempdir_obj = osutils.TempDir(set_global=True)
self.tempdir = self._tempdir_obj.tempdir
self.started = True
self.PreStart()
return self._start()
except:
self.stop()
raise
def stop(self):
"""Restores namespace to the unmocked state."""
try:
if self.__saved_env__ is not None:
osutils.SetEnvironment(self.__saved_env__)
tasks = ([self.PreStop] + [p.stop for p in self.patchers.itervalues()] +
[p.stop for p in self.external_patchers])
if self._tempdir_obj is not None:
tasks += [self._tempdir_obj.Cleanup]
cros_build_lib.SafeRun(tasks)
finally:
self.started = False
self.tempdir, self._tempdir_obj = None, None
def UnMockAttr(self, attr):
"""Unsetting the mock of an attribute/function."""
self.patchers.pop(attr).stop()
def CheckAttr(f):
"""Automatically set mock_attr based on class default.
This function decorator automatically sets the mock_attr keyword argument
based on the class default. The mock_attr specifies which mocked attribute
a given function is referring to.
Raises an AssertionError if mock_attr is left unspecified.
"""
def new_f(self, *args, **kwargs):
mock_attr = kwargs.pop('mock_attr', None)
if mock_attr is None:
mock_attr = self.DEFAULT_ATTR
if self.DEFAULT_ATTR is None:
raise AssertionError(
'mock_attr not specified, and no default configured.')
return f(self, *args, mock_attr=mock_attr, **kwargs)
return new_f
class PartialCmdMock(PartialMock):
"""Base class for mocking functions that wrap command line functionality.
Implements mocking for functions that shell out. The internal results are
'returncode', 'output', 'error'.
"""
CmdResult = collections.namedtuple(
'MockResult', ['returncode', 'output', 'error'])
DEFAULT_ATTR = None
@CheckAttr
def SetDefaultCmdResult(self, returncode=0, output='', error='',
side_effect=None, mock_attr=None):
"""Specify the default command result if no command is matched.
Arguments:
returncode, output, error: See AddCmdResult.
side_effect: See MockedCallResults.AddResultForParams
"""
result = self.CmdResult(returncode, output, error)
self._results[mock_attr].SetDefaultResult(result, side_effect)
@CheckAttr
def AddCmdResult(self, cmd, returncode=0, output='', error='',
kwargs=None, strict=False, side_effect=None, mock_attr=None):
"""Specify the result to simulate for a given command.
Arguments:
cmd: The command string or list to record a result for.
returncode: The returncode of the command (on the command line).
output: The stdout output of the command.
error: The stderr output of the command.
kwargs: Keyword arguments that the function needs to be invoked with.
strict: Defaults to False. See MockedCallResults.AddResultForParams.
side_effect: See MockedCallResults.AddResultForParams
"""
result = self.CmdResult(returncode, output, error)
self._results[mock_attr].AddResultForParams(
(cmd,), result, kwargs=kwargs, side_effect=side_effect, strict=strict)
@CheckAttr
def CommandContains(self, args, cmd_arg_index=-1, mock_attr=None, **kwargs):
"""Verify that at least one command contains the specified args.
Arguments:
args: Set of expected command-line arguments.
cmd_arg_index: The index of the command list in the positional call_args.
Defaults to the last positional argument.
kwargs: Set of expected keyword arguments.
"""
for call_args, call_kwargs in self.patched[mock_attr].call_args_list:
if (ListContains(args, call_args[cmd_arg_index]) and
DictContains(kwargs, call_kwargs)):
return True
return False
@CheckAttr
def assertCommandContains(self, args=(), expected=True, mock_attr=None,
**kwargs):
"""Assert that RunCommand was called with the specified args.
This verifies that at least one of the RunCommand calls contains the
specified arguments on the command line.
Arguments:
args: Set of expected command-line arguments.
expected: If False, instead verify that none of the RunCommand calls
contained the specified arguments.
**kwargs: Set of expected keyword arguments.
"""
if bool(expected) != self.CommandContains(args, **kwargs):
if expected:
msg = 'Expected to find %r in any of:\n%s'
else:
msg = 'Expected to not find %r in any of:\n%s'
patched = self.patched[mock_attr]
cmds = '\n'.join(repr(x) for x in patched.call_args_list)
raise AssertionError(msg % (mock.call(args, **kwargs), cmds))
| bsd-3-clause |
arista-eosplus/ansible | lib/ansible/plugins/callback/logstash.py | 12 | 6893 | # (C) 2016, Ievgen Khmelenko <ujenmr@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import socket
import uuid
import logging
try:
import logstash
HAS_LOGSTASH = True
except ImportError:
HAS_LOGSTASH = False
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
ansible logstash callback plugin
ansible.cfg:
callback_plugins = <path_to_callback_plugins_folder>
callback_whitelist = logstash
and put the plugin in <path_to_callback_plugins_folder>
logstash config:
input {
tcp {
port => 5000
codec => json
}
}
Requires:
python-logstash
This plugin makes use of the following environment variables:
LOGSTASH_SERVER (optional): defaults to localhost
LOGSTASH_PORT (optional): defaults to 5000
LOGSTASH_TYPE (optional): defaults to ansible
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'logstash'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
if not HAS_LOGSTASH:
self.disabled = True
self._display.warning("The required python-logstash is not installed. "
"pip install python-logstash")
else:
self.logger = logging.getLogger('python-logstash-logger')
self.logger.setLevel(logging.DEBUG)
self.handler = logstash.TCPLogstashHandler(
os.getenv('LOGSTASH_SERVER', 'localhost'),
int(os.getenv('LOGSTASH_PORT', 5000)),
version=1,
message_type=os.getenv('LOGSTASH_TYPE', 'ansible')
)
self.logger.addHandler(self.handler)
self.hostname = socket.gethostname()
self.session = str(uuid.uuid1())
self.errors = 0
def v2_playbook_on_start(self, playbook):
self.playbook = playbook._file_name
data = {
'status': "OK",
'host': self.hostname,
'session': self.session,
'ansible_type': "start",
'ansible_playbook': self.playbook,
}
self.logger.info("ansible start", extra=data)
def v2_playbook_on_stats(self, stats):
summarize_stat = {}
for host in stats.processed.keys():
summarize_stat[host] = stats.summarize(host)
if self.errors == 0:
status = "OK"
else:
status = "FAILED"
data = {
'status': status,
'host': self.hostname,
'session': self.session,
'ansible_type': "finish",
'ansible_playbook': self.playbook,
'ansible_result': json.dumps(summarize_stat),
}
self.logger.info("ansible stats", extra=data)
def v2_runner_on_ok(self, result, **kwargs):
data = {
'status': "OK",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.logger.info("ansible ok", extra=data)
def v2_runner_on_skipped(self, result, **kwargs):
data = {
'status': "SKIPPED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_task': result._task,
'ansible_host': result._host.name
}
self.logger.info("ansible skipped", extra=data)
def v2_playbook_on_import_for_host(self, result, imported_file):
data = {
'status': "IMPORTED",
'host': self.hostname,
'session': self.session,
'ansible_type': "import",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'imported_file': imported_file
}
self.logger.info("ansible import", extra=data)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
data = {
'status': "NOT IMPORTED",
'host': self.hostname,
'session': self.session,
'ansible_type': "import",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'missing_file': missing_file
}
self.logger.info("ansible import", extra=data)
def v2_runner_on_failed(self, result, **kwargs):
data = {
'status': "FAILED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.errors += 1
self.logger.error("ansible failed", extra=data)
def v2_runner_on_unreachable(self, result, **kwargs):
data = {
'status': "UNREACHABLE",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.logger.error("ansbile unreachable", extra=data)
def v2_runner_on_async_failed(self, result, **kwargs):
data = {
'status': "FAILED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.errors += 1
self.logger.error("ansible async", extra=data)
| gpl-3.0 |
fqhuy/minimind | prototypes/test_image_upsampling.py | 1 | 1261 | import numpy as np
import scipy as sp
from scipy.stats import multivariate_normal
from matplotlib import pyplot as plt
def k(x, a = -0.75):
if abs(x) <= 1:
return (a + 2.) * abs(x)**3 - (a + 3.) * abs(x) ** 2. + 1
elif 1 < abs(x) <= 2:
return a * abs(x) ** 3. - 5. * a * abs(x) **2. + 8. * a * abs(x) - 4. * a
else:
return 0.0
# x, y = np.mgrid[-1:1:.01, -1:1:.01]
# pos = np.empty(x.shape + (2,))
# pos[:, :, 0] = x; pos[:, :, 1] = y
# rv = multivariate_normal([0.0, 0.0], [[2.0, 0.3], [0.3, 2.0]])
# plt.contourf(x, y, rv.pdf(pos))
A = np.random.rand(5, 5)
B = A.copy()
# A = np.kron(A, np.ones((2, 2), dtype=float))
alpha = 5
C = np.zeros((A.shape[0] * alpha, A.shape[1] * alpha), dtype=float)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
C[i * alpha, j * alpha] = A[i, j]
# K = np.array([k(xx) for xx in [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]])
K = np.array([k(xx) for xx in np.linspace(-2, 2, 11)])
for r in range(C.shape[0]):
C[r, :] = np.convolve(C[r], K, 'same')
for c in range(A.shape[1]):
C[:, c] = np.convolve(C[:, c].flatten(), K, 'same')
plt.subplot(121)
plt.imshow(C, interpolation='bicubic')
plt.subplot(122)
plt.imshow(C, interpolation='nearest')
plt.show() | mit |
davislg/Google-s-Python-Class | basic/davis_solution/string2.py | 1 | 2910 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
if len(s) >= 3:
if s.endswith('ing'):
s = s + 'ly'
else:
s = s + 'ing'
return s
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
n = s.find('not')
b = s.find('bad')
# if b > n:
# Adding an extra test showed that the test
# for -1 is required for accurate results
if n != -1 and b != -1 and b > n:
s = s[:n] + 'good' + s[b + 3:]
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
a_mid = len(a) // 2
b_mid = len(b) // 2
if len(a) % 2 > 0:
a_mid = a_mid + 1
if len(b) % 2 > 0:
b_mid = b_mid + 1
return a[:a_mid] + b[:b_mid] + a[a_mid:] + b[b_mid:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
test(not_bad("This food is bad"), "This food is bad")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| apache-2.0 |
Dining-Engineers/left-luggage-detection | misc/demo/ipython/calibkinect.py | 2 | 3942 | """
These are some functions to help work with kinect camera calibration and projective
geometry.
Tasks:
- Convert the kinect depth image to a metric 3D point cloud
- Convert the 3D point cloud to texture coordinates in the RGB image
Notes about the coordinate systems:
There are three coordinate systems to worry about.
1. Kinect depth image:
u,v,depth
u and v are image coordinates, (0,0) is the top left corner of the image
(640,480) is the bottom right corner of the image
depth is the raw 11-bit image from the kinect, where 0 is infinitely far away
and larger numbers are closer to the camera
(2047 indicates an error pixel)
2. Kinect rgb image:
u,v
u and v are image coordinates (0,0) is the top left corner
(640,480) is the bottom right corner
3. XYZ world coordinates:
x,y,z
The 3D world coordinates, in meters, relative to the depth camera.
(0,0,0) is the camera center.
Negative Z values are in front of the camera, and the positive Z direction points
towards the camera.
The X axis points to the right, and the Y axis points up. This is the standard
right-handed coordinate system used by OpenGL.
"""
import numpy as np
def depth2xyzuv(depth, u=None, v=None):
"""
Return a point cloud, an Nx3 array, made by projecting the kinect depth map
through intrinsic / extrinsic calibration matrices
Parameters:
depth - comes directly from the kinect
u,v - are image coordinates, same size as depth (default is the original image)
Returns:
xyz - 3D world coordinates in meters (Nx3)
uv - image coordinates for the RGB image (Nx3)
You can provide only a portion of the depth image, or a downsampled version of
the depth image if you want; just make sure to provide the correct coordinates
in the u,v arguments.
Example:
# This downsamples the depth image by 2 and then projects to metric point cloud
u,v = mgrid[:480:2,:640:2]
xyz,uv = depth2xyzuv(freenect.sync_get_depth()[::2,::2], u, v)
# This projects only a small region of interest in the upper corner of the depth image
u,v = mgrid[10:120,50:80]
xyz,uv = depth2xyzuv(freenect.sync_get_depth()[v,u], u, v)
"""
if u is None or v is None:
u,v = np.mgrid[:480,:640]
# Build a 3xN matrix of the d,u,v data
C = np.vstack((u.flatten(), v.flatten(), depth.flatten(), 0*u.flatten()+1))
# Project the duv matrix into xyz using xyz_matrix()
X,Y,Z,W = np.dot(xyz_matrix(),C)
X,Y,Z = X/W, Y/W, Z/W
xyz = np.vstack((X,Y,Z)).transpose()
xyz = xyz[Z<0,:]
# Project the duv matrix into U,V rgb coordinates using rgb_matrix() and xyz_matrix()
U,V,_,W = np.dot(np.dot(uv_matrix(), xyz_matrix()),C)
U,V = U/W, V/W
uv = np.vstack((U,V)).transpose()
uv = uv[Z<0,:]
# Return both the XYZ coordinates and the UV coordinates
return xyz, uv
def uv_matrix():
"""
Returns a matrix you can use to project XYZ coordinates (in meters) into
U,V coordinates in the kinect RGB image
"""
rot = np.array([[ 9.99846e-01, -1.26353e-03, 1.74872e-02],
[-1.4779096e-03, -9.999238e-01, 1.225138e-02],
[1.747042e-02, -1.227534e-02, -9.99772e-01]])
trans = np.array([[1.9985e-02, -7.44237e-04,-1.0916736e-02]])
m = np.hstack((rot, -trans.transpose()))
m = np.vstack((m, np.array([[0,0,0,1]])))
KK = np.array([[529.2, 0, 329, 0],
[0, 525.6, 267.5, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]])
m = np.dot(KK, (m))
return m
def xyz_matrix():
fx = 594.21
fy = 591.04
a = -0.0030711
b = 3.3309495
cx = 339.5
cy = 242.7
mat = np.array([[1/fx, 0, 0, -cx/fx],
[0, -1/fy, 0, cy/fy],
[0, 0, 0, -1],
[0, 0, a, b]])
return mat | gpl-2.0 |
Leoniela/nipype | nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py | 14 | 1926 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.cmtk.cmtk import CreateMatrix
def test_CreateMatrix_inputs():
input_map = dict(count_region_intersections=dict(usedefault=True,
),
out_endpoint_array_name=dict(genfile=True,
),
out_fiber_length_std_matrix_mat_file=dict(genfile=True,
),
out_intersection_matrix_mat_file=dict(genfile=True,
),
out_matrix_file=dict(genfile=True,
),
out_matrix_mat_file=dict(usedefault=True,
),
out_mean_fiber_length_matrix_mat_file=dict(genfile=True,
),
out_median_fiber_length_matrix_mat_file=dict(genfile=True,
),
resolution_network_file=dict(mandatory=True,
),
roi_file=dict(mandatory=True,
),
tract_file=dict(mandatory=True,
),
)
inputs = CreateMatrix.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_CreateMatrix_outputs():
output_map = dict(endpoint_file=dict(),
endpoint_file_mm=dict(),
fiber_label_file=dict(),
fiber_labels_noorphans=dict(),
fiber_length_file=dict(),
fiber_length_std_matrix_mat_file=dict(),
filtered_tractographies=dict(),
filtered_tractography=dict(),
filtered_tractography_by_intersections=dict(),
intersection_matrix_file=dict(),
intersection_matrix_mat_file=dict(),
matlab_matrix_files=dict(),
matrix_file=dict(),
matrix_files=dict(),
matrix_mat_file=dict(),
mean_fiber_length_matrix_mat_file=dict(),
median_fiber_length_matrix_mat_file=dict(),
stats_file=dict(),
)
outputs = CreateMatrix.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
brentdax/swift | utils/gyb_syntax_support/Trivia.py | 3 | 3161 | from Node import error
from kinds import lowercase_first_word
class Trivia(object):
def __init__(self, name, comment, serialization_code, characters=[],
swift_characters=[], is_new_line=False, is_comment=False):
self.name = name
self.comment = comment
self.serialization_code = serialization_code
self.characters = characters
self.lower_name = lowercase_first_word(name)
self.is_new_line = is_new_line
self.is_comment = is_comment
# Swift sometimes doesn't support escaped characters like \f or \v;
# we should allow specifying alternatives explicitly.
self.swift_characters = swift_characters if swift_characters else\
characters
assert len(self.swift_characters) == len(self.characters)
def characters_len(self):
return len(self.characters)
def is_collection(self):
return self.characters_len() > 0
TRIVIAS = [
Trivia('Space', 'A space \' \' character.', characters=[' '],
serialization_code=0),
Trivia('Tab', 'A tab \'\\t\' character.', characters=['\\t'],
serialization_code=1),
Trivia('VerticalTab', 'A vertical tab \'\\v\' character.',
characters=['\\v'], swift_characters=['\\u{2B7F}'],
serialization_code=2),
Trivia('Formfeed', 'A form-feed \'f\' character.', characters=['\\f'],
swift_characters=['\\u{240C}'], serialization_code=3),
Trivia('Newline', 'A newline \'\\n\' character.', characters=['\\n'],
is_new_line=True, serialization_code=4),
Trivia('CarriageReturn', 'A newline \'\\r\' character.',
characters=['\\r'], is_new_line=True, serialization_code=5),
Trivia('CarriageReturnLineFeed',
'A newline consists of contiguous \'\\r\' and \'\\n\' characters.',
characters=['\\r', '\\n'], is_new_line=True, serialization_code=6),
Trivia('Backtick',
'A backtick \'`\' character, used to escape identifiers.',
characters=['`'], serialization_code=7),
Trivia('LineComment', 'A developer line comment, starting with \'//\'',
is_comment=True, serialization_code=8),
Trivia('BlockComment',
'A developer block comment, starting with \'/*\' and ending with'
' \'*/\'.',
is_comment=True, serialization_code=9),
Trivia('DocLineComment',
'A documentation line comment, starting with \'///\'.',
is_comment=True, serialization_code=10),
Trivia('DocBlockComment',
'A documentation block comment, starting with \'/**\' and ending '
'with \'*/\'.',
is_comment=True, serialization_code=11),
Trivia('GarbageText', 'Any skipped garbage text.', serialization_code=12),
]
def verify_no_duplicate_serialization_codes(trivias):
used_codes = set()
for trivia in trivias:
if trivia.serialization_code in used_codes:
error("Serialization code %d used twice for trivia" %
trivia.serialization_code)
used_codes.add(trivia.serialization_code)
verify_no_duplicate_serialization_codes(TRIVIAS)
| apache-2.0 |
MarcoXerox/NetSDK | FacebookSession.py | 1 | 6023 | import re, itertools, utils, Base
from bs4 import BeautifulSoup as BS
class FacebookSession(Base.WebSession):
''' Session for Facebook. Check attribute METHODS for methods it supports.
FacebookHandle comes with multisession support.
'''
# Constant values of FacebookSession
HOME = 'https://m.facebook.com/'
PROF = HOME + 'profile.php'
TABS = 'timeline friends info photos likes followers'.split()
INFO_ATTRS = 'Mobile Address Facebook Birthday Gender Languages Hometown'.split()
METHODS = 'log_out profile is_private friends info likes shares'.split()
def __init__(self, login, passwd):
url = self.HOME + 'login.php'
params = {'fb_noscript':'0', 'email':login, 'pass':passwd}
super().__init__(url_login=url, url_auth=url, params=params)
def log_out(self):
doc = self.access(self.HOME)
start = doc.find('/logout.php')
end = doc.find('">', start)
self.session.post(self.HOME + doc[start : end])
def profile(self, personID, tab):
''' Shortcut to access profile '''
assert tab in self.TABS
return self.access(self.PROF, params={'id':personID, 'v':tab})
def id_from_vanity(self, vanity):
''' Return personID from given vanity.
Significantly slower than its inverse.
'''
doc = self.access('%s%s?v=following' % (self.HOME, vanity))
start = doc.find('/mbasic/more/?owner_id=')
end = doc.find('"', start)
return doc[start + 23 : end]
def vanity_from_id(self, personID):
''' Return vanity from given personID.
Significantly faster than its inverse.
'''
DESK_PROF = 'https://www.facebook.com/profile.php'
resp = self.session.head(DESK_PROF, params={'id': personID})
return resp.headers['location'][25:] if resp.is_redirect else None
def _friends_document_from_tab(self, personID, mutual, startindex):
'''Primitive method: return HTML from Friends Tab'''
return self.access(self.PROF, {'v':'friends', 'id':personID, 'mutual':mutual, 'startindex':startindex})
def friends(self, personID, mutual=None):
''' How to optimize further? '''
mutual = self.is_private(personID) if mutual is None else mutual
NFD = 24 # Number of friends displayed if include non-mutuals.
FPP = 36 # Friends per page
doc = self._friends_document_from_tab(personID, mutual, 0)
_n = re.search('Friends\s\(([0-9\,]*)\)', doc)
if _n is None:
return list()
num = int(_n.group(1).replace(',',''))
return self._friends_from_doc(doc) + [item for pg in range(FPP if mutual else NFD, num, FPP) for item in self._friends_from_tab(personID, mutual, pg)]
def _friends_from_tab(self, personID, mutual, startindex):
''' _friends_from_tab = _friends_from_doc . _friends_document_from_tab '''
return self._friends_from_doc(self._friends_document_from_tab(personID, mutual, startindex))
def _friends_from_doc(self, document):
soup = BS(document, Base.PARSER)
add_friend = '/a/mobile/friends/add_friend.php?id='
id_prefix = '/profile.php?id='
results = list()
for x in soup('table')[1:-4]:
try:
name = x.img['alt']
vnORid, *MaybeID = [y['href'] for y in x('a')]
except (TypeError, KeyError):
continue
pid = MaybeID.pop() if MaybeID else None
personID = None
if pid is not None and pid.startswith(add_friend):
amp_idx = pid.find('&')
personID = pid[36:amp_idx]
vanity = vnORid[1:vnORid.find('?fref')]
correct = '/' not in vanity and '?' not in vanity
result = None
if vnORid.startswith(id_prefix):
result = (name, vnORid[16:-21], None)
elif correct:
result = (name, personID, vanity)
if result is not None:
results.append(result)
return results
def info(self, personID):
''' Return a dictionary with all elements of INFO_ATTRS as keys.'''
soup = BS(self.profile(personID, 'info'), Base.PARSER)
info = dict()
def fn(string, attribute):
# Handles different attributes accordingly
q = utils.remove_substr(string, attribute)
d = {'Birthday' : utils.Birthdate,
'Facebook' : utils.drop_two,
'Languages': utils.list_langs}
if attribute not in d:
return q
return d[attribute](q)
for attribute in self.INFO_ATTRS:
query = soup.find('div', attrs={'title' : attribute})
info[attribute.lower()] = None if query is None else fn(query.text, attribute)
info['name'] = soup.title.text
return info
def likes(self, personID):
''' Return a set of names of pages liked by personID
Facebook has different profile IDs for
equivalent or similar pages.
'''
soup = BS(self.profile(personID, 'likes'), Base.PARSER)
extras = [soup.title.text, 'See more', '', 'Ask', 'Request sent', 'AskRequest sent']
return {t for t in map(lambda s: s.text, soup('span')) if t not in extras}
def shares(self, personID):
''' Return a list of lists of people whom personID
shared anything with. Currently only support front page.
'''
soup = BS(self.profile(personID, 'timeline'), Base.PARSER)
browse_regex = '\/browse\/users\/\?ids=([0-9C%]*)\&'
res = (re.match(browse_regex, t) for t in map(lambda x: x.get('href'), soup('a')) if t is not None)
return [s.group(1).split('%2C') for s in res if s]
def is_private(self, personID):
'''Return whether friend list of personID is private or not.'''
return not self._friends_from_tab(personID, False, 1)
| gpl-3.0 |
aerophile/django | django/utils/dateparse.py | 63 | 3985 | """Functions to parse datetime objects."""
# We're using regular expressions rather than time.strptime because:
# - They provide both validation and parsing.
# - They're more flexible for datetimes.
# - The date/datetime/time constructors produce friendlier error messages.
import datetime
import re
from django.utils import six
from django.utils.timezone import get_fixed_timezone, utc
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
standard_duration_re = re.compile(
r'^'
r'(?:(?P<days>-?\d+) )?'
r'((?:(?P<hours>\d+):)(?=\d+:\d+))?'
r'(?:(?P<minutes>\d+):)?'
r'(?P<seconds>\d+)'
r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
r'$'
)
# Support the sections of ISO 8601 date representation that are accepted by
# timedelta
iso8601_duration_re = re.compile(
r'^P'
r'(?:(?P<days>\d+(.\d+)?)D)?'
r'(?:T'
r'(?:(?P<hours>\d+(.\d+)?)H)?'
r'(?:(?P<minutes>\d+(.\d+)?)M)?'
r'(?:(?P<seconds>\d+(.\d+)?)S)?'
r')?'
r'$'
)
def parse_date(value):
"""Parses a string and return a datetime.date.
Raises ValueError if the input is well formatted but not a valid date.
Returns None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = {k: int(v) for k, v in six.iteritems(match.groupdict())}
return datetime.date(**kw)
def parse_time(value):
"""Parses a string and return a datetime.time.
This function doesn't support time zone offsets.
Raises ValueError if the input is well formatted but not a valid time.
Returns None if the input isn't well formatted, in particular if it
contains an offset.
"""
match = time_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
return datetime.time(**kw)
def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset
tzinfo = get_fixed_timezone(offset)
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw)
def parse_duration(value):
"""Parses a duration string and returns a datetime.timedelta.
The preferred format for durations in Django is '%d %H:%M:%S.%f'.
Also supports ISO 8601 representation.
"""
match = standard_duration_re.match(value)
if not match:
match = iso8601_duration_re.match(value)
if match:
kw = match.groupdict()
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
kw = {k: float(v) for k, v in six.iteritems(kw) if v is not None}
return datetime.timedelta(**kw)
| bsd-3-clause |
BeegorMif/HTPC-Manager | lib/hachoir_parser/audio/xm.py | 90 | 15154 | """
Parser of FastTrackerII Extended Module (XM) version 1.4
Documents:
- Modplug source code (file modplug/soundlib/Load_xm.cpp)
http://sourceforge.net/projects/modplug
- Dumb source code (files include/dumb.h and src/it/readxm.c
http://dumb.sf.net/
- Documents of "XM" format on Wotsit
http://www.wotsit.org
Author: Christophe GISQUET <christophe.gisquet@free.fr>
Creation: 8th February 2007
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, RawBits, Bits,
UInt32, UInt16, UInt8, Int8, Enum,
RawBytes, String, GenericVector)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_parser.audio.modplug import ParseModplugMetadata
from lib.hachoir_parser.common.tracker import NOTE_NAME
def parseSigned(val):
return "%i" % (val.value-128)
# From dumb
SEMITONE_BASE = 1.059463094359295309843105314939748495817
PITCH_BASE = 1.000225659305069791926712241547647863626
SAMPLE_LOOP_MODE = ("No loop", "Forward loop", "Ping-pong loop", "Undef")
class SampleType(FieldSet):
static_size = 8
def createFields(self):
yield Bits(self, "unused[]", 4)
yield Bit(self, "16bits")
yield Bits(self, "unused[]", 1)
yield Enum(Bits(self, "loop_mode", 2), SAMPLE_LOOP_MODE)
class SampleHeader(FieldSet):
static_size = 40*8
def createFields(self):
yield UInt32(self, "length")
yield UInt32(self, "loop_start")
yield UInt32(self, "loop_end")
yield UInt8(self, "volume")
yield Int8(self, "fine_tune")
yield SampleType(self, "type")
yield UInt8(self, "panning")
yield Int8(self, "relative_note")
yield UInt8(self, "reserved")
yield String(self, "name", 22, charset="ASCII", strip=' \0')
def createValue(self):
bytes = 1+self["type/16bits"].value
C5_speed = int(16726.0*pow(SEMITONE_BASE, self["relative_note"].value)
*pow(PITCH_BASE, self["fine_tune"].value*2))
return "%s, %ubits, %u samples, %uHz" % \
(self["name"].display, 8*bytes, self["length"].value/bytes, C5_speed)
class StuffType(StaticFieldSet):
format = (
(Bits, "unused", 5),
(Bit, "loop"),
(Bit, "sustain"),
(Bit, "on")
)
class InstrumentSecondHeader(FieldSet):
static_size = 234*8
def createFields(self):
yield UInt32(self, "sample_header_size")
yield GenericVector(self, "notes", 96, UInt8, "sample")
yield GenericVector(self, "volume_envelope", 24, UInt16, "point")
yield GenericVector(self, "panning_envelope", 24, UInt16, "point")
yield UInt8(self, "volume_points", r"Number of volume points")
yield UInt8(self, "panning_points", r"Number of panning points")
yield UInt8(self, "volume_sustain_point")
yield UInt8(self, "volume_loop_start_point")
yield UInt8(self, "volume_loop_end_point")
yield UInt8(self, "panning_sustain_point")
yield UInt8(self, "panning_loop_start_point")
yield UInt8(self, "panning_loop_end_point")
yield StuffType(self, "volume_type")
yield StuffType(self, "panning_type")
yield UInt8(self, "vibrato_type")
yield UInt8(self, "vibrato_sweep")
yield UInt8(self, "vibrato_depth")
yield UInt8(self, "vibrato_rate")
yield UInt16(self, "volume_fadeout")
yield GenericVector(self, "reserved", 11, UInt16, "word")
def createInstrumentContentSize(s, addr):
start = addr
samples = s.stream.readBits(addr+27*8, 16, LITTLE_ENDIAN)
# Seek to end of header (1st + 2nd part)
addr += 8*s.stream.readBits(addr, 32, LITTLE_ENDIAN)
sample_size = 0
if samples:
for index in xrange(samples):
# Read the sample size from the header
sample_size += s.stream.readBits(addr, 32, LITTLE_ENDIAN)
# Seek to next sample header
addr += SampleHeader.static_size
return addr - start + 8*sample_size
class Instrument(FieldSet):
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name)
self._size = createInstrumentContentSize(self, self.absolute_address)
self.info(self.createDescription())
# Seems to fix things...
def fixInstrumentHeader(self):
size = self["size"].value - self.current_size//8
if size:
yield RawBytes(self, "unknown_data", size)
def createFields(self):
yield UInt32(self, "size")
yield String(self, "name", 22, charset="ASCII", strip=" \0")
# Doc says type is always 0, but I've found values of 24 and 96 for
# the _same_ song here, just different download sources for the file
yield UInt8(self, "type")
yield UInt16(self, "samples")
num = self["samples"].value
self.info(self.createDescription())
if num:
yield InstrumentSecondHeader(self, "second_header")
for field in self.fixInstrumentHeader():
yield field
# This part probably wrong
sample_size = [ ]
for index in xrange(num):
sample = SampleHeader(self, "sample_header[]")
yield sample
sample_size.append(sample["length"].value)
for size in sample_size:
if size:
yield RawBytes(self, "sample_data[]", size, "Deltas")
else:
for field in self.fixInstrumentHeader():
yield field
def createDescription(self):
return "Instrument '%s': %i samples, header %i bytes" % \
(self["name"].value, self["samples"].value, self["size"].value)
VOLUME_NAME = (
"Volume slide down", "Volume slide up", "Fine volume slide down",
"Fine volume slide up", "Set vibrato speed", "Vibrato",
"Set panning", "Panning slide left", "Panning slide right",
"Tone porta", "Unhandled")
def parseVolume(val):
val = val.value
if 0x10<=val<=0x50:
return "Volume %i" % val-16
else:
return VOLUME_NAME[val/16 - 6]
class RealBit(RawBits):
static_size = 1
def __init__(self, parent, name, description=None):
RawBits.__init__(self, parent, name, 1, description=description)
def createValue(self):
return self._parent.stream.readBits(self.absolute_address, 1, BIG_ENDIAN)
class NoteInfo(StaticFieldSet):
format = (
(RawBits, "unused", 2),
(RealBit, "has_parameter"),
(RealBit, "has_type"),
(RealBit, "has_volume"),
(RealBit, "has_instrument"),
(RealBit, "has_note")
)
EFFECT_NAME = (
"Arppegio", "Porta up", "Porta down", "Tone porta", "Vibrato",
"Tone porta+Volume slide", "Vibrato+Volume slide", "Tremolo",
"Set panning", "Sample offset", "Volume slide", "Position jump",
"Set volume", "Pattern break", None, "Set tempo/BPM",
"Set global volume", "Global volume slide", "Unused", "Unused",
"Unused", "Set envelope position", "Unused", "Unused",
"Panning slide", "Unused", "Multi retrig note", "Unused",
"Tremor", "Unused", "Unused", "Unused", None)
EFFECT_E_NAME = (
"Unknown", "Fine porta up", "Fine porta down",
"Set gliss control", "Set vibrato control", "Set finetune",
"Set loop begin/loop", "Set tremolo control", "Retrig note",
"Fine volume slide up", "Fine volume slide down", "Note cut",
"Note delay", "Pattern delay")
class Effect(RawBits):
def __init__(self, parent, name):
RawBits.__init__(self, parent, name, 8)
def createValue(self):
t = self.parent.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN)
param = self.parent.stream.readBits(self.absolute_address+8, 8, LITTLE_ENDIAN)
if t == 0x0E:
return EFFECT_E_NAME[param>>4] + " %i" % (param&0x07)
elif t == 0x21:
return ("Extra fine porta up", "Extra fine porta down")[param>>4]
else:
return EFFECT_NAME[t]
class Note(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self.flags = self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN)
if self.flags&0x80:
# TODO: optimize bitcounting with a table:
# http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetTable
self._size = 8
if self.flags&0x01: self._size += 8
if self.flags&0x02: self._size += 8
if self.flags&0x04: self._size += 8
if self.flags&0x08: self._size += 8
if self.flags&0x10: self._size += 8
else:
self._size = 5*8
def createFields(self):
# This stupid shit gets the LSB, not the MSB...
self.info("Note info: 0x%02X" %
self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN))
yield RealBit(self, "is_extended")
if self["is_extended"].value:
info = NoteInfo(self, "info")
yield info
if info["has_note"].value:
yield Enum(UInt8(self, "note"), NOTE_NAME)
if info["has_instrument"].value:
yield UInt8(self, "instrument")
if info["has_volume"].value:
yield textHandler(UInt8(self, "volume"), parseVolume)
if info["has_type"].value:
yield Effect(self, "effect_type")
if info["has_parameter"].value:
yield textHandler(UInt8(self, "effect_parameter"), hexadecimal)
else:
yield Enum(Bits(self, "note", 7), NOTE_NAME)
yield UInt8(self, "instrument")
yield textHandler(UInt8(self, "volume"), parseVolume)
yield Effect(self, "effect_type")
yield textHandler(UInt8(self, "effect_parameter"), hexadecimal)
def createDescription(self):
if "info" in self:
info = self["info"]
desc = []
if info["has_note"].value:
desc.append(self["note"].display)
if info["has_instrument"].value:
desc.append("instrument %i" % self["instrument"].value)
if info["has_volume"].value:
desc.append(self["has_volume"].display)
if info["has_type"].value:
desc.append("effect %s" % self["effect_type"].value)
if info["has_parameter"].value:
desc.append("parameter %i" % self["effect_parameter"].value)
else:
desc = (self["note"].display, "instrument %i" % self["instrument"].value,
self["has_volume"].display, "effect %s" % self["effect_type"].value,
"parameter %i" % self["effect_parameter"].value)
if desc:
return "Note %s" % ", ".join(desc)
else:
return "Note"
class Row(FieldSet):
def createFields(self):
for index in xrange(self["/header/channels"].value):
yield Note(self, "note[]")
def createPatternContentSize(s, addr):
return 8*(s.stream.readBits(addr, 32, LITTLE_ENDIAN) +
s.stream.readBits(addr+7*8, 16, LITTLE_ENDIAN))
class Pattern(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self._size = createPatternContentSize(self, self.absolute_address)
def createFields(self):
yield UInt32(self, "header_size", r"Header length (9)")
yield UInt8(self, "packing_type", r"Packing type (always 0)")
yield UInt16(self, "rows", r"Number of rows in pattern (1..256)")
yield UInt16(self, "data_size", r"Packed patterndata size")
rows = self["rows"].value
self.info("Pattern: %i rows" % rows)
for index in xrange(rows):
yield Row(self, "row[]")
def createDescription(self):
return "Pattern with %i rows" % self["rows"].value
class Header(FieldSet):
MAGIC = "Extended Module: "
static_size = 336*8
def createFields(self):
yield String(self, "signature", 17, "XM signature", charset="ASCII")
yield String(self, "title", 20, "XM title", charset="ASCII", strip=' ')
yield UInt8(self, "marker", "Marker (0x1A)")
yield String(self, "tracker_name", 20, "XM tracker name", charset="ASCII", strip=' ')
yield UInt8(self, "format_minor")
yield UInt8(self, "format_major")
yield filesizeHandler(UInt32(self, "header_size", "Header size (276)"))
yield UInt16(self, "song_length", "Length in patten order table")
yield UInt16(self, "restart", "Restart position")
yield UInt16(self, "channels", "Number of channels (2,4,6,8,10,...,32)")
yield UInt16(self, "patterns", "Number of patterns (max 256)")
yield UInt16(self, "instruments", "Number of instruments (max 128)")
yield Bit(self, "amiga_ftable", "Amiga frequency table")
yield Bit(self, "linear_ftable", "Linear frequency table")
yield Bits(self, "unused", 14)
yield UInt16(self, "tempo", "Default tempo")
yield UInt16(self, "bpm", "Default BPM")
yield GenericVector(self, "pattern_order", 256, UInt8, "order")
def createDescription(self):
return "'%s' by '%s'" % (
self["title"].value, self["tracker_name"].value)
class XMModule(Parser):
PARSER_TAGS = {
"id": "fasttracker2",
"category": "audio",
"file_ext": ("xm",),
"mime": (
u'audio/xm', u'audio/x-xm',
u'audio/module-xm', u'audio/mod', u'audio/x-mod'),
"magic": ((Header.MAGIC, 0),),
"min_size": Header.static_size +29*8, # Header + 1 empty instrument
"description": "FastTracker2 module"
}
endian = LITTLE_ENDIAN
def validate(self):
header = self.stream.readBytes(0, 17)
if header != Header.MAGIC:
return "Invalid signature '%s'" % header
if self["/header/header_size"].value != 276:
return "Unknown header size (%u)" % self["/header/header_size"].value
return True
def createFields(self):
yield Header(self, "header")
for index in xrange(self["/header/patterns"].value):
yield Pattern(self, "pattern[]")
for index in xrange(self["/header/instruments"].value):
yield Instrument(self, "instrument[]")
# Metadata added by ModPlug - can be discarded
for field in ParseModplugMetadata(self):
yield field
def createContentSize(self):
# Header size
size = Header.static_size
# Add patterns size
for index in xrange(self["/header/patterns"].value):
size += createPatternContentSize(self, size)
# Add instruments size
for index in xrange(self["/header/instruments"].value):
size += createInstrumentContentSize(self, size)
# Not reporting Modplug metadata
return size
def createDescription(self):
return self["header"].description
| gpl-3.0 |
dvliman/jaikuengine | .google_appengine/lib/cherrypy/cherrypy/test/test_static.py | 42 | 11354 | from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob
from cherrypy._cpcompat import BytesIO
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
has_space_filepath = os.path.join(curdir, 'static', 'has space.html')
bigfile_filepath = os.path.join(curdir, "static", "bigfile.log")
BIGFILE_SIZE = 1024 * 1024
import threading
import cherrypy
from cherrypy.lib import static
from cherrypy.test import helper
class StaticTest(helper.CPWebCase):
def setup_server():
if not os.path.exists(has_space_filepath):
open(has_space_filepath, 'wb').write(ntob('Hello, world\r\n'))
if not os.path.exists(bigfile_filepath):
open(bigfile_filepath, 'wb').write(ntob("x" * BIGFILE_SIZE))
class Root:
def bigfile(self):
from cherrypy.lib import static
self.f = static.serve_file(bigfile_filepath)
return self.f
bigfile.exposed = True
bigfile._cp_config = {'response.stream': True}
def tell(self):
if self.f.input.closed:
return ''
return repr(self.f.input.tell()).rstrip('L')
tell.exposed = True
def fileobj(self):
f = open(os.path.join(curdir, 'style.css'), 'rb')
return static.serve_fileobj(f, content_type='text/css')
fileobj.exposed = True
def bytesio(self):
f = BytesIO(ntob('Fee\nfie\nfo\nfum'))
return static.serve_fileobj(f, content_type='text/plain')
bytesio.exposed = True
class Static:
def index(self):
return 'You want the Baron? You can have the Baron!'
index.exposed = True
def dynamic(self):
return "This is a DYNAMIC page"
dynamic.exposed = True
root = Root()
root.static = Static()
rootconf = {
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir,
},
'/style.css': {
'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(curdir, 'style.css'),
},
'/docroot': {
'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
'tools.staticdir.index': 'index.html',
},
'/error': {
'tools.staticdir.on': True,
'request.show_tracebacks': True,
},
}
rootApp = cherrypy.Application(root)
rootApp.merge(rootconf)
test_app_conf = {
'/test': {
'tools.staticdir.index': 'index.html',
'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
},
}
testApp = cherrypy.Application(Static())
testApp.merge(test_app_conf)
vhost = cherrypy._cpwsgi.VirtualHost(rootApp, {'virt.net': testApp})
cherrypy.tree.graft(vhost)
setup_server = staticmethod(setup_server)
def teardown_server():
for f in (has_space_filepath, bigfile_filepath):
if os.path.exists(f):
try:
os.unlink(f)
except:
pass
teardown_server = staticmethod(teardown_server)
def testStatic(self):
self.getPage("/static/index.html")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('Hello, world\r\n')
# Using a staticdir.root value in a subdir...
self.getPage("/docroot/index.html")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('Hello, world\r\n')
# Check a filename with spaces in it
self.getPage("/static/has%20space.html")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('Hello, world\r\n')
self.getPage("/style.css")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css')
# Note: The body should be exactly 'Dummy stylesheet\n', but
# unfortunately some tools such as WinZip sometimes turn \n
# into \r\n on Windows when extracting the CherryPy tarball so
# we just check the content
self.assertMatchesBody('^Dummy stylesheet')
def test_fallthrough(self):
# Test that NotFound will then try dynamic handlers (see [878]).
self.getPage("/static/dynamic")
self.assertBody("This is a DYNAMIC page")
# Check a directory via fall-through to dynamic handler.
self.getPage("/static/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('You want the Baron? You can have the Baron!')
def test_index(self):
# Check a directory via "staticdir.index".
self.getPage("/docroot/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('Hello, world\r\n')
# The same page should be returned even if redirected.
self.getPage("/docroot")
self.assertStatus(301)
self.assertHeader('Location', '%s/docroot/' % self.base())
self.assertMatchesBody("This resource .* <a href='%s/docroot/'>"
"%s/docroot/</a>." % (self.base(), self.base()))
def test_config_errors(self):
# Check that we get an error if no .file or .dir
self.getPage("/error/thing.html")
self.assertErrorPage(500)
self.assertMatchesBody(ntob("TypeError: staticdir\(\) takes at least 2 "
"(positional )?arguments \(0 given\)"))
def test_security(self):
# Test up-level security
self.getPage("/static/../../test/style.css")
self.assertStatus((400, 403))
def test_modif(self):
# Test modified-since on a reasonably-large file
self.getPage("/static/dirback.jpg")
self.assertStatus("200 OK")
lastmod = ""
for k, v in self.headers:
if k == 'Last-Modified':
lastmod = v
ims = ("If-Modified-Since", lastmod)
self.getPage("/static/dirback.jpg", headers=[ims])
self.assertStatus(304)
self.assertNoHeader("Content-Type")
self.assertNoHeader("Content-Length")
self.assertNoHeader("Content-Disposition")
self.assertBody("")
def test_755_vhost(self):
self.getPage("/test/", [('Host', 'virt.net')])
self.assertStatus(200)
self.getPage("/test", [('Host', 'virt.net')])
self.assertStatus(301)
self.assertHeader('Location', self.scheme + '://virt.net/test/')
def test_serve_fileobj(self):
self.getPage("/fileobj")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css;charset=utf-8')
self.assertMatchesBody('^Dummy stylesheet')
def test_serve_bytesio(self):
self.getPage("/bytesio")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/plain;charset=utf-8')
self.assertHeader('Content-Length', 14)
self.assertMatchesBody('Fee\nfie\nfo\nfum')
def test_file_stream(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Make an initial request
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/bigfile", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
body = ntob('')
remaining = BIGFILE_SIZE
while remaining > 0:
data = response.fp.read(65536)
if not data:
break
body += data
remaining -= len(data)
if self.scheme == "https":
newconn = HTTPSConnection
else:
newconn = HTTPConnection
s, h, b = helper.webtest.openURL(
ntob("/tell"), headers=[], host=self.HOST, port=self.PORT,
http_conn=newconn)
if not b:
# The file was closed on the server.
tell_position = BIGFILE_SIZE
else:
tell_position = int(b)
expected = len(body)
if tell_position >= BIGFILE_SIZE:
# We can't exactly control how much content the server asks for.
# Fudge it by only checking the first half of the reads.
if expected < (BIGFILE_SIZE / 2):
self.fail(
"The file should have advanced to position %r, but has "
"already advanced to the end of the file. It may not be "
"streamed as intended, or at the wrong chunk size (64k)" %
expected)
elif tell_position < expected:
self.fail(
"The file should have advanced to position %r, but has "
"only advanced to position %r. It may not be streamed "
"as intended, or at the wrong chunk size (65536)" %
(expected, tell_position))
if body != ntob("x" * BIGFILE_SIZE):
self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." %
(BIGFILE_SIZE, body[:50], len(body)))
conn.close()
def test_file_stream_deadlock(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Make an initial request but abort early.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/bigfile", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
body = response.fp.read(65536)
if body != ntob("x" * len(body)):
self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." %
(65536, body[:50], len(body)))
response.close()
conn.close()
# Make a second request, which should fetch the whole file.
self.persistent = False
self.getPage("/bigfile")
if self.body != ntob("x" * BIGFILE_SIZE):
self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." %
(BIGFILE_SIZE, self.body[:50], len(body)))
| apache-2.0 |
zestrada/nova-cs498cc | nova/virt/hyperv/livemigrationops.py | 10 | 4079 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for live migration VM operations.
"""
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
class LiveMigrationOps(object):
def __init__(self):
self._pathutils = pathutils.PathUtils()
self._livemigrutils = livemigrationutils.LiveMigrationUtils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
LOG.debug(_("live_migration called"), instance=instance_ref)
instance_name = instance_ref["name"]
try:
iscsi_targets = self._livemigrutils.live_migrate_vm(instance_name,
dest)
for (target_iqn, target_lun) in iscsi_targets:
self._volumeops.logout_storage_target(target_iqn)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Calling live migration recover_method "
"for instance: %s"), instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug(_("Calling live migration post_method for instance: %s"),
instance_name)
post_method(context, instance_ref, dest, block_migration)
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug(_("pre_live_migration called"), instance=instance)
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
if not boot_from_volume:
self._imagecache.get_cached_image(context, instance)
self._volumeops.login_storage_targets(block_device_info)
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug(_("post_live_migration_at_destination called"),
instance=instance_ref)
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
LOG.debug(_("check_can_live_migrate_destination called"), instance_ref)
return {}
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug(_("check_can_live_migrate_destination_cleanup called"))
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug(_("check_can_live_migrate_source called"), instance_ref)
return dest_check_data
| apache-2.0 |
ericdill/bokeh | bokeh/server/tests/test_utils.py | 4 | 2721 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import mock
import threading
import time
import uuid
import unittest
import requests
from requests.exceptions import ConnectionError
from .. import start, configure
from ..app import bokeh_app, app
from ..models import user
from ..settings import settings as server_settings
def wait_flask():
def helper():
try:
return requests.get('http://localhost:5006/bokeh/ping')
except ConnectionError:
return False
return wait_until(helper)
def wait_until(func, timeout=1.0, interval=0.01):
st = time.time()
while True:
if func():
return True
if (time.time() - st) > timeout:
return False
time.sleep(interval)
class BaseBokehServerTestCase(unittest.TestCase):
options = {}
class MemoryBokehServerTestCase(BaseBokehServerTestCase):
def setUp(self):
# clear tornado ioloop instance
server_settings.reset()
server_settings.model_backend = {'type' : 'memory'}
for k,v in self.options.items():
setattr(server_settings, k, v)
bokeh_app.stdout = None
bokeh_app.stderr = None
self.serverthread = threading.Thread(target=start.start_simple_server)
self.serverthread.start()
wait_flask()
# not great - but no good way to wait for zmq to come up
time.sleep(0.1)
make_default_user(bokeh_app)
def tearDown(self):
start.stop()
self.serverthread.join()
BokehServerTestCase = MemoryBokehServerTestCase
def make_default_user(bokeh_app):
bokehuser = user.new_user(bokeh_app.servermodel_storage, "defaultuser",
str(uuid.uuid4()), apikey='nokey', docs=[])
return bokehuser
class FlaskClientTestCase(BaseBokehServerTestCase):
def setUp(self):
server_settings.reset()
for k,v in self.options.items():
setattr(server_settings, k, v)
server_settings.model_backend = {'type' : 'memory'}
configure.configure_flask()
with mock.patch('bokeh.server.configure.logging'):
configure.register_blueprint()
#ugh..need better way to initialize this
app.secret_key = server_settings.secret_key
app.debug = True
self.client = app.test_client()
def tearDown(self):
pass
| bsd-3-clause |
jazztpt/edx-platform | cms/lib/xblock/field_data.py | 234 | 1188 | """
:class:`~xblock.field_data.FieldData` subclasses used by the CMS
"""
from xblock.field_data import SplitFieldData
from xblock.fields import Scope
class CmsFieldData(SplitFieldData):
"""
A :class:`~xblock.field_data.FieldData` that
reads all UserScope.ONE and UserScope.ALL fields from `student_data`
and all UserScope.NONE fields from `authored_data`. It allows writing to`authored_data`.
"""
def __init__(self, authored_data, student_data):
# Make sure that we don't repeatedly nest CmsFieldData instances
if isinstance(authored_data, CmsFieldData):
authored_data = authored_data._authored_data # pylint: disable=protected-access
self._authored_data = authored_data
self._student_data = student_data
super(CmsFieldData, self).__init__({
Scope.content: authored_data,
Scope.settings: authored_data,
Scope.parent: authored_data,
Scope.children: authored_data,
Scope.user_state_summary: student_data,
Scope.user_state: student_data,
Scope.user_info: student_data,
Scope.preferences: student_data,
})
| agpl-3.0 |
nhomar/odoo | addons/crm/crm.py | 267 | 7967 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.http import request
AVAILABLE_PRIORITIES = [
('0', 'Very Low'),
('1', 'Low'),
('2', 'Normal'),
('3', 'High'),
('4', 'Very High'),
]
class crm_tracking_medium(osv.Model):
# OLD crm.case.channel
_name = "crm.tracking.medium"
_description = "Channels"
_order = 'name'
_columns = {
'name': fields.char('Channel Name', required=True),
'active': fields.boolean('Active'),
}
_defaults = {
'active': lambda *a: 1,
}
class crm_tracking_campaign(osv.Model):
# OLD crm.case.resource.type
_name = "crm.tracking.campaign"
_description = "Campaign"
_rec_name = "name"
_columns = {
'name': fields.char('Campaign Name', required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
class crm_tracking_source(osv.Model):
_name = "crm.tracking.source"
_description = "Source"
_rec_name = "name"
_columns = {
'name': fields.char('Source Name', required=True, translate=True),
}
class crm_tracking_mixin(osv.AbstractModel):
"""Mixin class for objects which can be tracked by marketing. """
_name = 'crm.tracking.mixin'
_columns = {
'campaign_id': fields.many2one('crm.tracking.campaign', 'Campaign', # old domain ="['|',('section_id','=',section_id),('section_id','=',False)]"
help="This is a name that helps you keep track of your different campaign efforts Ex: Fall_Drive, Christmas_Special"),
'source_id': fields.many2one('crm.tracking.source', 'Source', help="This is the source of the link Ex: Search Engine, another domain, or name of email list"),
'medium_id': fields.many2one('crm.tracking.medium', 'Channel', help="This is the method of delivery. Ex: Postcard, Email, or Banner Ad", oldname='channel_id'),
}
def tracking_fields(self):
return [('utm_campaign', 'campaign_id'), ('utm_source', 'source_id'), ('utm_medium', 'medium_id')]
def tracking_get_values(self, cr, uid, vals, context=None):
for key, fname in self.tracking_fields():
field = self._fields[fname]
value = vals.get(fname) or (request and request.httprequest.cookies.get(key)) # params.get should be always in session by the dispatch from ir_http
if field.type == 'many2one' and isinstance(value, basestring):
# if we receive a string for a many2one, we search/create the id
if value:
Model = self.pool[field.comodel_name]
rel_id = Model.name_search(cr, uid, value, context=context)
if rel_id:
rel_id = rel_id[0][0]
else:
rel_id = Model.create(cr, uid, {'name': value}, context=context)
vals[fname] = rel_id
else:
# Here the code for others cases that many2one
vals[fname] = value
return vals
def _get_default_track(self, cr, uid, field, context=None):
return self.tracking_get_values(cr, uid, {}, context=context).get(field)
_defaults = {
'source_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'source_id', ctx),
'campaign_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'campaign_id', ctx),
'medium_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'medium_id', ctx),
}
class crm_case_stage(osv.osv):
""" Model for case stages. This models the main stages of a document
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.case.stage"
_description = "Stage of case"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'probability': fields.float('Probability (%)', required=True, help="This percentage depicts the default/average probability of the Case for this stage to be a success"),
'on_change': fields.boolean('Change Probability Automatically', help="Setting this stage will change the probability automatically on the opportunity."),
'requirements': fields.text('Requirements'),
'section_ids': fields.many2many('crm.case.section', 'section_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'case_default': fields.boolean('Default to New Sales Team',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
'type': fields.selection([('lead', 'Lead'), ('opportunity', 'Opportunity'), ('both', 'Both')],
string='Type', required=True,
help="This field is used to distinguish stages related to Leads from stages related to Opportunities, or to specify stages available for both types."),
}
_defaults = {
'sequence': 1,
'probability': 0.0,
'on_change': True,
'fold': False,
'type': 'both',
'case_default': True,
}
class crm_case_categ(osv.osv):
""" Category of Case """
_name = "crm.case.categ"
_description = "Category of Case"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'object_id': fields.many2one('ir.model', 'Object Name'),
}
def _find_object_id(self, cr, uid, context=None):
"""Finds id for case object"""
context = context or {}
object_id = context.get('object_id', False)
ids = self.pool.get('ir.model').search(cr, uid, ['|', ('id', '=', object_id), ('model', '=', context.get('object_name', False))])
return ids and ids[0] or False
_defaults = {
'object_id': _find_object_id
}
class crm_payment_mode(osv.osv):
""" Payment Mode for Fund """
_name = "crm.payment.mode"
_description = "CRM Payment Mode"
_columns = {
'name': fields.char('Name', required=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
BryceLohr/authentic | authentic2/saml/migrations/0005_auto__del_unique_libertyprovider_name.py | 3 | 16602 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from authentic2.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'LibertyProvider', fields ['name']
db.delete_unique('saml_libertyprovider', ['name'])
def backwards(self, orm):
# Adding unique constraint on 'LibertyProvider', fields ['name']
db.create_unique('saml_libertyprovider', ['name'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'saml.authorizationattributemap': {
'Meta': {'object_name': 'AuthorizationAttributeMap'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'saml.authorizationattributemapping': {
'Meta': {'object_name': 'AuthorizationAttributeMapping'},
'attribute_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'attribute_value': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'attribute_value_format': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['saml.AuthorizationAttributeMap']"}),
'source_attribute_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'saml.authorizationsppolicy': {
'Meta': {'object_name': 'AuthorizationSPPolicy'},
'attribute_map': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'authorization_attributes'", 'null': 'True', 'to': "orm['saml.AuthorizationAttributeMap']"}),
'default_denial_message': ('django.db.models.fields.CharField', [], {'default': "u'You are not authorized to access the service.'", 'max_length': '80'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
'saml.idpoptionssppolicy': {
'Meta': {'object_name': 'IdPOptionsSPPolicy'},
'allow_create': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'binding_for_sso_response': ('django.db.models.fields.CharField', [], {'default': "'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Artifact'", 'max_length': '60'}),
'enable_binding_for_sso_response': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_http_method_for_defederation_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_http_method_for_slo_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'http_method_for_defederation_request': ('django.db.models.fields.IntegerField', [], {'default': '5', 'max_length': '60'}),
'http_method_for_slo_request': ('django.db.models.fields.IntegerField', [], {'default': '4', 'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'no_nameid_policy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'requested_name_id_format': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '20'}),
'transient_is_persistent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_consent': ('django.db.models.fields.CharField', [], {'default': "'urn:oasis:names:tc:SAML:2.0:consent:current-implicit'", 'max_length': '60'}),
'want_authn_request_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'want_force_authn_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'want_is_passive_authn_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'saml.keyvalue': {
'Meta': {'object_name': 'KeyValue'},
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'value': ('authentic2.saml.fields.PickledObjectField', [], {})
},
'saml.libertyartifact': {
'Meta': {'object_name': 'LibertyArtifact'},
'artifact': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider_id': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'saml.libertyassertion': {
'Meta': {'object_name': 'LibertyAssertion'},
'assertion': ('django.db.models.fields.TextField', [], {}),
'assertion_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider_id': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'session_index': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'saml.libertyattributemap': {
'Meta': {'object_name': 'LibertyAttributeMap'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'saml.libertyattributemapping': {
'Meta': {'object_name': 'LibertyAttributeMapping'},
'attribute_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'attribute_value_format': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['saml.LibertyAttributeMap']"}),
'source_attribute_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'saml.libertyfederation': {
'Meta': {'unique_together': "(('name_id_qualifier', 'name_id_format', 'name_id_content', 'name_id_sp_name_qualifier'),)", 'object_name': 'LibertyFederation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idp_id': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'name_id_content': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_id_format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_id_qualifier': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'name_id_sp_name_qualifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_id_sp_provided_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'sp_id': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'saml.libertyidentitydump': {
'Meta': {'object_name': 'LibertyIdentityDump'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label, 'unique': 'True'})
},
'saml.libertyidentityprovider': {
'Meta': {'object_name': 'LibertyIdentityProvider'},
'authorization_policy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'authorization_policy'", 'null': 'True', 'to': "orm['saml.AuthorizationSPPolicy']"}),
'enable_following_authorization_policy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_following_idp_options_policy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'idp_options_policy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'idp_options_policy'", 'null': 'True', 'to': "orm['saml.IdPOptionsSPPolicy']"}),
'liberty_provider': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'identity_provider'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['saml.LibertyProvider']"})
},
'saml.libertymanagedump': {
'Meta': {'object_name': 'LibertyManageDump'},
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manage_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'saml.libertyprovider': {
'Meta': {'object_name': 'LibertyProvider'},
'ca_cert_chain': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'entity_id': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'entity_id_sha1': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'federation_source': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'protocol_conformance': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'public_key': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ssl_certificate': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'saml.libertyserviceprovider': {
'Meta': {'object_name': 'LibertyServiceProvider'},
'accepted_name_id_format': ('authentic2.saml.fields.MultiSelectField', [], {'max_length': '31', 'blank': 'True'}),
'ask_user_consent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attribute_map': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'service_providers'", 'null': 'True', 'to': "orm['saml.LibertyAttributeMap']"}),
'authn_request_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'default_name_id_format': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '20'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'encrypt_assertion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'encrypt_nameid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'idp_initiated_sso': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liberty_provider': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'service_provider'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['saml.LibertyProvider']"}),
'prefered_assertion_consumer_binding': ('django.db.models.fields.CharField', [], {'default': "'meta'", 'max_length': '4'})
},
'saml.libertysession': {
'Meta': {'object_name': 'LibertySession'},
'assertion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['saml.LibertyAssertion']", 'null': 'True'}),
'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'federation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['saml.LibertyFederation']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_id_content': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_id_format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'name_id_qualifier': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True'}),
'name_id_sp_name_qualifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'provider_id': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'session_index': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'saml.libertysessiondump': {
'Meta': {'object_name': 'LibertySessionDump'},
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'saml.libertysessionsp': {
'Meta': {'object_name': 'LibertySessionSP'},
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'federation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['saml.LibertyFederation']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_index': ('django.db.models.fields.CharField', [], {'max_length': '80'})
}
}
complete_apps = ['saml']
| agpl-3.0 |
teampopong/crawlers | election_commission/crawlers/local/base.py | 3 | 3397 | #!/usr/bin/python2.7
# -*- encoding=utf-8 -*-
import gevent
from gevent import monkey
import itertools
from urlparse import urljoin
from utils import flatten, get_json, get_xpath, parse_cell, sanitize, split
monkey.patch_all()
class BaseCrawler(object):
url_image_base = 'http://info.nec.go.kr'
attrs = []
attrs_exclude_parse_cell = ['image']
def parse(self, url, city_name=None):
elems = get_xpath(url, '//td')
num_attrs = len(self.attrs)
members = (dict(zip(self.attrs, elems[i*num_attrs:(i+1)*num_attrs]))\
for i in xrange(len(elems) / num_attrs))
members = [self.parse_member(member, city_name) for member in members]
print 'crawled #%d - %s(%d)...' % (self.nth, city_name, len(members))
return members
def parse_record(self, record):
for attr in self.attrs:
if attr not in self.attrs_exclude_parse_cell:
record[attr] = parse_cell(record[attr])
def parse_member(self, member, city_name):
self.parse_record(member)
# never change the order
member['election_no'] = self.nth
member['election_type'] = self.level
self.parse_member_image(member)
self.parse_member_name(member)
self.parse_member_birth(member)
self.parse_member_district(member, city_name)
self.parse_member_party(member)
self.parse_member_vote(member)
return member
def parse_member_image(self, member):
if 'image' not in member: return
rel_path = member['image'].find("./img").attrib['src']
member['image'] = urljoin(self.url_image_base, rel_path)
def parse_member_name(self, member):
if 'name' not in member: return
member['name_kr'], member['name_cn'] = map(sanitize, member['name'][:2])
del member['name']
def parse_member_birth(self, member):
if 'birth' not in member: return
member['birthyear'], member['birthmonth'], member['birthday'] =\
split(member['birth'][0])
del member['birth']
def parse_member_district(self, member, city_name):
if city_name:
member['district'] = '%s %s' % (city_name, member['district'])
def parse_member_party(self, member):
if 'party' not in member: return
if isinstance(member['party'], list):
member['party'] = member['party'][0]
def parse_member_vote(self, member):
if 'vote' not in member: return
member['votenum'], member['voterate'] = map(sanitize, member['vote'][:2])
member['votenum'] = member['votenum'].replace(',', '')
del member['vote']
class MultiCityCrawler(BaseCrawler):
def city_ids(self):
list_ = get_json(self.url_city_ids_json)['body']
return map(lambda x: (x['CODE'], x['NAME']), list_)
def url_list(self, city_id):
return self.url_list_base + str(city_id)
def crawl(self):
jobs = []
for city_id, city_name in self.city_ids():
req_url = self.url_list(city_id)
job = gevent.spawn(self.parse, req_url, city_name)
jobs.append(job)
gevent.joinall(jobs)
people = flatten(job.get() for job in jobs)
return people
class SinglePageCrawler(BaseCrawler):
def crawl(self):
people = self.parse(self.url_list)
return people
| agpl-3.0 |
adoosii/edx-platform | common/lib/xmodule/xmodule/progress.py | 110 | 5066 | '''
Progress class for modules. Represents where a student is in a module.
Useful things to know:
- Use Progress.to_js_status_str() to convert a progress into a simple
status string to pass to js.
- Use Progress.to_js_detail_str() to convert a progress into a more detailed
string to pass to js.
In particular, these functions have a canonical handing of None.
For most subclassing needs, you should only need to reimplement
frac() and __str__().
'''
import numbers
class Progress(object):
'''Represents a progress of a/b (a out of b done)
a and b must be numeric, but not necessarily integer, with
0 <= a <= b and b > 0.
Progress can only represent Progress for modules where that makes sense. Other
modules (e.g. html) should return None from get_progress().
TODO: add tag for module type? Would allow for smarter merging.
'''
def __init__(self, a, b):
'''Construct a Progress object. a and b must be numbers, and must have
0 <= a <= b and b > 0
'''
# Want to do all checking at construction time, so explicitly check types
if not (isinstance(a, numbers.Number) and
isinstance(b, numbers.Number)):
raise TypeError('a and b must be numbers. Passed {0}/{1}'.format(a, b))
if a > b:
a = b
if a < 0:
a = 0
if b <= 0:
raise ValueError('fraction a/b = {0}/{1} must have b > 0'.format(a, b))
self._a = a
self._b = b
def frac(self):
''' Return tuple (a,b) representing progress of a/b'''
return (self._a, self._b)
def percent(self):
''' Returns a percentage progress as a float between 0 and 100.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return 100.0 * a / b
def started(self):
''' Returns True if fractional progress is greater than 0.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
return self.frac()[0] > 0
def inprogress(self):
''' Returns True if fractional progress is strictly between 0 and 1.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a > 0 and a < b
def done(self):
''' Return True if this represents done.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a == b
def ternary_str(self):
''' Return a string version of this progress: either
"none", "in_progress", or "done".
subclassing note: implemented in terms of frac()
'''
(a, b) = self.frac()
if a == 0:
return "none"
if a < b:
return "in_progress"
return "done"
def __eq__(self, other):
''' Two Progress objects are equal if they have identical values.
Implemented in terms of frac()'''
if not isinstance(other, Progress):
return False
(a, b) = self.frac()
(a2, b2) = other.frac()
return a == a2 and b == b2
def __ne__(self, other):
''' The opposite of equal'''
return not self.__eq__(other)
def __str__(self):
'''Return a string representation of this string. Rounds results to
two decimal places, stripping out any trailing zeroes.
subclassing note: implemented in terms of frac().
'''
(a, b) = self.frac()
display = lambda n: '{:.2f}'.format(n).rstrip('0').rstrip('.')
return "{0}/{1}".format(display(a), display(b))
@staticmethod
def add_counts(a, b):
'''Add two progress indicators, assuming that each represents items done:
(a / b) + (c / d) = (a + c) / (b + d).
If either is None, returns the other.
'''
if a is None:
return b
if b is None:
return a
# get numerators + denominators
(n, d) = a.frac()
(n2, d2) = b.frac()
return Progress(n + n2, d + d2)
@staticmethod
def to_js_status_str(progress):
'''
Return the "status string" version of the passed Progress
object that should be passed to js. Use this function when
sending Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return progress.ternary_str()
@staticmethod
def to_js_detail_str(progress):
'''
Return the "detail string" version of the passed Progress
object that should be passed to js. Use this function when
passing Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return str(progress)
| agpl-3.0 |
kmolab/kmolab.github.io | data/Brython-3.3.4/Lib/test/test_pipes.py | 126 | 6473 | import pipes
import os
import string
import unittest
from test.support import TESTFN, run_unittest, unlink, reap_children
if os.name != 'posix':
raise unittest.SkipTest('pipes module only works on posix')
TESTFN2 = TESTFN + "2"
# tr a-z A-Z is not portable, so make the ranges explicit
s_command = 'tr %s %s' % (string.ascii_lowercase, string.ascii_uppercase)
class SimplePipeTests(unittest.TestCase):
def tearDown(self):
for f in (TESTFN, TESTFN2):
unlink(f)
def testSimplePipe1(self):
t = pipes.Template()
t.append(s_command, pipes.STDIN_STDOUT)
f = t.open(TESTFN, 'w')
f.write('hello world #1')
f.close()
with open(TESTFN) as f:
self.assertEqual(f.read(), 'HELLO WORLD #1')
def testSimplePipe2(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN > $OUT', pipes.FILEIN_FILEOUT)
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), 'HELLO WORLD #2')
def testSimplePipe3(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN', pipes.FILEIN_STDOUT)
f = t.open(TESTFN, 'r')
try:
self.assertEqual(f.read(), 'HELLO WORLD #2')
finally:
f.close()
def testEmptyPipeline1(self):
# copy through empty pipe
d = 'empty pipeline test COPY'
with open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN2, 'w') as f:
f.write('')
t=pipes.Template()
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), d)
def testEmptyPipeline2(self):
# read through empty pipe
d = 'empty pipeline test READ'
with open(TESTFN, 'w') as f:
f.write(d)
t=pipes.Template()
f = t.open(TESTFN, 'r')
try:
self.assertEqual(f.read(), d)
finally:
f.close()
def testEmptyPipeline3(self):
# write through empty pipe
d = 'empty pipeline test WRITE'
t = pipes.Template()
with t.open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN) as f:
self.assertEqual(f.read(), d)
def testRepr(self):
t = pipes.Template()
self.assertEqual(repr(t), "<Template instance, steps=[]>")
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
self.assertEqual(repr(t),
"<Template instance, steps=[('tr a-z A-Z', '--')]>")
def testSetDebug(self):
t = pipes.Template()
t.debug(False)
self.assertEqual(t.debugging, False)
t.debug(True)
self.assertEqual(t.debugging, True)
def testReadOpenSink(self):
# check calling open('r') on a pipe ending with
# a sink raises ValueError
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.open, 'bogusfile', 'r')
def testWriteOpenSource(self):
# check calling open('w') on a pipe ending with
# a source raises ValueError
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.open, 'bogusfile', 'w')
def testBadAppendOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.append, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.append, 'boguscmd', 'xx')
# shouldn't be able to append a source
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SOURCE)
# check appending two sinks
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SINK)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadPrependOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.prepend, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.prepend, 'tr a-z A-Z', 'xx')
# shouldn't be able to prepend a sink
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SINK)
# check prepending two sources
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SOURCE)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadOpenMode(self):
t = pipes.Template()
self.assertRaises(ValueError, t.open, 'bogusfile', 'x')
def testClone(self):
t = pipes.Template()
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
u = t.clone()
self.assertNotEqual(id(t), id(u))
self.assertEqual(t.steps, u.steps)
self.assertNotEqual(id(t.steps), id(u.steps))
self.assertEqual(t.debugging, u.debugging)
def test_main():
run_unittest(SimplePipeTests)
reap_children()
if __name__ == "__main__":
test_main()
| agpl-3.0 |
nwjs/chromium.src | gpu/command_buffer/PRESUBMIT.py | 3 | 3329 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enforces command buffer autogen matches script output.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
import os.path
def _IsGLES2CmdBufferFile(file):
filename = os.path.basename(file.LocalPath())
if filename in [
'build_cmd_buffer_lib.py', 'build_gles2_cmd_buffer.py',
'gles2_cmd_buffer_functions.txt', 'gl2.h', 'gl2ext.h', 'gl3.h', 'gl31.h',
'gl2chromium.h', 'gl2extchromium.h'
]:
return True
return ((filename.startswith('gles2') or filename.startswith('context_state')
or filename.startswith('client_context_state')) and
filename.endswith('_autogen.h'))
def _IsRasterCmdBufferFile(file):
filename = os.path.basename(file.LocalPath())
if filename in [
'build_cmd_buffer_lib.py', 'build_raster_cmd_buffer.py',
'raster_cmd_buffer_functions.txt'
]:
return True
return filename.startswith('raster') and filename.endswith('_autogen.h')
def _IsWebGPUCmdBufferFile(file):
filename = os.path.basename(file.LocalPath())
if filename in [
'build_cmd_buffer_lib.py', 'build_webgpu_cmd_buffer.py',
'webgpu_cmd_buffer_functions.txt'
]:
return True
return filename.startswith('webgpu') and filename.endswith('_autogen.h')
def CommonChecks(input_api, output_api):
gles2_cmd_buffer_files = input_api.AffectedFiles(
file_filter=_IsGLES2CmdBufferFile)
raster_cmd_buffer_files = input_api.AffectedFiles(
file_filter=_IsRasterCmdBufferFile)
webgpu_cmd_buffer_files = input_api.AffectedFiles(
file_filter=_IsWebGPUCmdBufferFile)
messages = []
with input_api.temporary_directory() as temp_dir:
commands = []
if len(gles2_cmd_buffer_files) > 0:
commands.append(
input_api.Command(
name='build_gles2_cmd_buffer',
cmd=[
input_api.python_executable, 'build_gles2_cmd_buffer.py',
'--check', '--output-dir=' + temp_dir
],
kwargs={},
message=output_api.PresubmitError))
if len(raster_cmd_buffer_files) > 0:
commands.append(
input_api.Command(
name='build_raster_cmd_buffer',
cmd=[
input_api.python_executable, 'build_raster_cmd_buffer.py',
'--check', '--output-dir=' + temp_dir
],
kwargs={},
message=output_api.PresubmitError))
if len(webgpu_cmd_buffer_files) > 0:
commands.append(
input_api.Command(
name='build_webgpu_cmd_buffer',
cmd=[
input_api.python_executable, 'build_webgpu_cmd_buffer.py',
'--check', '--output-dir=' + temp_dir
],
kwargs={},
message=output_api.PresubmitError))
if len(commands) > 0:
messages.extend(input_api.RunTests(commands))
return messages
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
| bsd-3-clause |
petewarden/tensorflow | tensorflow/python/ops/default_gradient.py | 21 | 3070 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for computing default gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
def get_zeros_dtype(t):
"""Return the dtype for the default gradient for a Tensor."""
if t.dtype == dtypes.resource:
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
if (handle_data is None or not handle_data.is_set or
len(handle_data.shape_and_type) != 1):
raise ValueError("Internal error: Tried to take gradients (or similar) "
"of a variable without handle data:\n%s" % str(t))
return handle_data.shape_and_type[0].dtype
return t.dtype
def shape_and_dtype(t):
"""Return the shape and dtype for the default gradient for a Tensor."""
if t.dtype == dtypes.resource:
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
if (handle_data is None or not handle_data.is_set or
len(handle_data.shape_and_type) != 1):
raise ValueError("Internal error: Tried to take gradients (or similar) "
"of a variable without handle data:\n%s" % str(t))
shape_and_type = handle_data.shape_and_type[0]
return (tensor_shape.TensorShape(shape_and_type.shape),
dtypes.as_dtype(shape_and_type.dtype))
return t.shape, t.dtype
def zeros_like(t):
"""Like array_ops.zeros_like, but respects resource handles."""
if t.dtype == dtypes.resource:
return array_ops.zeros(*shape_and_dtype(t))
else:
return array_ops.zeros_like(t)
def ones_like(t):
"""Like array_ops.ones_like, but respects resource handles."""
if t.dtype == dtypes.resource:
return array_ops.ones(*shape_and_dtype(t))
else:
return array_ops.ones_like(t)
def supports_default_grad(t):
"""Whether tensor `t` supports creating a default gradient.
This function assumes that `t` is of a trainable type.
Args:
t: Tensor
Returns:
Bool
"""
if t.dtype == dtypes.resource:
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
if (handle_data is None or not handle_data.is_set or
len(handle_data.shape_and_type) != 1):
return False
return True
| apache-2.0 |
mtp1376/youtube-dl | youtube_dl/extractor/ellentv.py | 12 | 3239 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_iso8601,
)
class EllenTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:ellentv|ellentube)\.com/videos/(?P<id>[a-z0-9_-]+)'
_TESTS = [{
'url': 'http://www.ellentv.com/videos/0-7jqrsr18/',
'md5': 'e4af06f3bf0d5f471921a18db5764642',
'info_dict': {
'id': '0-7jqrsr18',
'ext': 'mp4',
'title': 'What\'s Wrong with These Photos? A Whole Lot',
'description': 'md5:35f152dc66b587cf13e6d2cf4fa467f6',
'timestamp': 1406876400,
'upload_date': '20140801',
}
}, {
'url': 'http://ellentube.com/videos/0-dvzmabd5/',
'md5': '98238118eaa2bbdf6ad7f708e3e4f4eb',
'info_dict': {
'id': '0-dvzmabd5',
'ext': 'mp4',
'title': '1 year old twin sister makes her brother laugh',
'description': '1 year old twin sister makes her brother laugh',
'timestamp': 1419542075,
'upload_date': '20141225',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_meta('VideoURL', webpage, 'url')
title = self._og_search_title(webpage, default=None) or self._search_regex(
r'pageName\s*=\s*"([^"]+)"', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description') or self._og_search_description(webpage)
timestamp = parse_iso8601(self._search_regex(
r'<span class="publish-date"><time datetime="([^"]+)">',
webpage, 'timestamp'))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'timestamp': timestamp,
}
class EllenTVClipsIE(InfoExtractor):
IE_NAME = 'EllenTV:clips'
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
'info_dict': {
'id': 'meryl-streep-vanessa-hudgens',
'title': 'Meryl Streep, Vanessa Hudgens',
},
'playlist_mincount': 9,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._extract_playlist(webpage)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist)
}
def _extract_playlist(self, webpage):
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
try:
return json.loads("[{" + json_string + "}]")
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
def _extract_entries(self, playlist):
return [self.url_result(item['url'], 'EllenTV') for item in playlist]
| unlicense |
tdsmith/pip | pip/req/req_file.py | 239 | 9670 | """
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils import normalize_name
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: Global options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines = content.splitlines()
lines = ignore_comments(lines)
lines = join_lines(lines)
lines = skip_regex(lines, options)
for line_number, line in enumerate(lines, 1):
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_dir = os.path.dirname(filename)
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# set finder options
elif finder:
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.allow_all_external:
finder.allow_all_external = opts.allow_all_external
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.allow_external:
finder.allow_external |= set(
[normalize_name(v).lower() for v in opts.allow_external])
if opts.allow_unverified:
# Remove after 7.0
finder.allow_unverified |= set(
[normalize_name(v).lower() for v in opts.allow_unverified])
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(iterator):
"""
Joins a line ending in '\' with the previous line.
"""
lines = []
for line in iterator:
if not line.endswith('\\'):
if lines:
lines.append(line)
yield ''.join(lines)
lines = []
else:
yield line
else:
lines.append(line.strip('\\'))
# TODO: handle space after '\'.
# TODO: handle '\' on last line.
def ignore_comments(iterator):
"""
Strips and filters empty or commented lines.
"""
for line in iterator:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line
def skip_regex(lines, options):
"""
Optionally exclude lines that match '--skip-requirements-regex'
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
lines = filterfalse(re.compile(skip_regex).search, lines)
return lines
| mit |
rworkman/blueman | blueman/main/DBusProxies.py | 1 | 1419 | # coding=utf-8
from gi.repository import Gio, GLib
from gi.types import GObjectMeta
class DBusProxyFailed(Exception):
pass
class ProxyBaseMeta(GObjectMeta):
_instance = None
def __call__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
class ProxyBase(Gio.DBusProxy, metaclass=ProxyBaseMeta):
def __init__(self, name, interface_name, object_path='/', systembus=False, *args, **kwargs):
if systembus:
bustype = Gio.BusType.SYSTEM
else:
bustype = Gio.BusType.SESSION
super().__init__(
g_name=name,
g_interface_name=interface_name,
g_object_path=object_path,
g_bus_type=bustype,
g_flags=Gio.DBusProxyFlags.NONE,
*args, **kwargs
)
try:
self.init()
except GLib.Error as e:
raise DBusProxyFailed(e.message)
class Mechanism(ProxyBase):
def __init__(self, *args, **kwargs):
super().__init__(name='org.blueman.Mechanism', interface_name='org.blueman.Mechanism',
systembus=True, *args, **kwargs)
class AppletService(ProxyBase):
def __init__(self, *args, **kwargs):
super().__init__(name='org.blueman.Applet', interface_name='org.blueman.Applet',
*args, **kwargs)
| gpl-3.0 |
Ruide/angr-dev | angr/angr/analyses/cfg/cfg_utils.py | 3 | 7621 |
from collections import defaultdict
import networkx
class SCCPlaceholder(object):
__slots__ = ['scc_id']
def __init__(self, scc_id):
self.scc_id = scc_id
def __eq__(self, other):
return isinstance(other, SCCPlaceholder) and other.scc_id == self.scc_id
def __hash__(self):
return hash('scc_placeholder_%d' % self.scc_id)
class CFGUtils(object):
"""
A helper class with some static methods and algorithms implemented, that in fact, might take more than just normal
CFGs.
"""
@staticmethod
def find_merge_points(function_addr, function_endpoints, graph): # pylint:disable=unused-argument
"""
Given a local transition graph of a function, find all merge points inside, and then perform a
quasi-topological sort of those merge points.
A merge point might be one of the following cases:
- two or more paths come together, and ends at the same address.
- end of the current function
:param int function_addr: Address of the function.
:param list function_endpoints: Endpoints of the function. They typically come from Function.endpoints.
:param networkx.DiGraph graph: A local transition graph of a function. Normally it comes from Function.graph.
:return: A list of ordered addresses of merge points.
:rtype: list
"""
merge_points = set()
in_degree_to_nodes = defaultdict(set)
for node in graph.nodes_iter():
in_degree = graph.in_degree(node)
in_degree_to_nodes[in_degree].add(node)
if in_degree > 1:
merge_points.add(node)
ordered_merge_points = CFGUtils.quasi_topological_sort_nodes(graph, merge_points)
addrs = [n.addr for n in ordered_merge_points]
return addrs
@staticmethod
def find_widening_points(function_addr, function_endpoints, graph): # pylint: disable=unused-argument
"""
Given a local transition graph of a function, find all widening points inside.
Correctly choosing widening points is very important in order to not lose too much information during static
analysis. We mainly consider merge points that has at least one loop back edges coming in as widening points.
:param int function_addr: Address of the function.
:param list function_endpoints: Endpoints of the function, typically coming from Function.endpoints.
:param networkx.DiGraph graph: A local transition graph of a function, normally Function.graph.
:return: A list of addresses of widening points.
:rtype: list
"""
sccs = networkx.strongly_connected_components(graph)
widening_addrs = set()
for scc in sccs:
if len(scc) == 1:
node = next(iter(scc))
if graph.has_edge(node, node):
# self loop
widening_addrs.add(node.addr)
else:
for n in scc:
predecessors = graph.predecessors(n)
if any([ p not in scc for p in predecessors]):
widening_addrs.add(n.addr)
break
return list(widening_addrs)
@staticmethod
def reverse_post_order_sort_nodes(graph, nodes=None):
"""
Sort a given set of nodes in reverse post ordering.
:param networkx.DiGraph graph: A local transition graph of a function.
:param iterable nodes: A collection of nodes to sort.
:return: A list of sorted nodes.
:rtype: list
"""
post_order = networkx.dfs_postorder_nodes(graph)
if nodes is None:
return reversed(list(post_order))
addrs_to_index = {}
for i, n in enumerate(post_order):
addrs_to_index[n.addr] = i
return sorted(nodes, key=lambda n: addrs_to_index[n.addr], reverse=True)
@staticmethod
def quasi_topological_sort_nodes(graph, nodes=None):
"""
Sort a given set of nodes from a graph based on the following rules:
# - if A -> B and not B -> A, then we have A < B
# - if A -> B and B -> A, then the ordering is undefined
Following the above rules gives us a quasi-topological sorting of nodes in the graph. It also works for cyclic
graphs.
:param networkx.DiGraph graph: A local transition graph of the function.
:param iterable nodes: A list of nodes to sort. None if you want to sort all nodes inside the graph.
:return: A list of ordered nodes.
:rtype: list
"""
# fast path for single node graphs
if graph.number_of_nodes() == 1:
return graph.nodes()
# make a copy to the graph since we are gonna modify it
graph_copy = networkx.DiGraph()
# find all strongly connected components in the graph
sccs = [ scc for scc in networkx.strongly_connected_components(graph) if len(scc) > 1 ]
# collapse all strongly connected components
for src, dst in graph.edges_iter():
scc_index = CFGUtils._components_index_node(sccs, src)
if scc_index is not None:
src = SCCPlaceholder(scc_index)
scc_index = CFGUtils._components_index_node(sccs, dst)
if scc_index is not None:
dst = SCCPlaceholder(scc_index)
if isinstance(src, SCCPlaceholder) and isinstance(dst, SCCPlaceholder) and src == dst:
continue
if src == dst:
continue
graph_copy.add_edge(src, dst)
# topological sort on acyclic graph `graph_copy`
tmp_nodes = networkx.topological_sort(graph_copy)
ordered_nodes = [ ]
for n in tmp_nodes:
if isinstance(n, SCCPlaceholder):
CFGUtils._append_scc(graph, ordered_nodes, sccs[n.scc_id])
else:
ordered_nodes.append(n)
if nodes is None:
return ordered_nodes
nodes = set(nodes)
ordered_nodes = [ n for n in ordered_nodes if n in nodes ]
return ordered_nodes
@staticmethod
def _components_index_node(components, node):
for i, comp in enumerate(components):
if node in comp:
return i
return None
@staticmethod
def _append_scc(graph, ordered_nodes, scc):
"""
Append all nodes from a strongly connected component to a list of ordered nodes and ensure the topological
order.
:param networkx.DiGraph graph: The graph where all nodes belong to.
:param list ordered_nodes: Ordered nodes.
:param iterable scc: A set of nodes that forms a strongly connected component in the graph.
:return: None
"""
# find the first node in the strongly connected component that is the successor to any node in ordered_nodes
loop_head = None
for parent_node in reversed(ordered_nodes):
for n in scc:
if n in graph[parent_node]:
loop_head = n
break
if loop_head is not None:
break
if loop_head is None:
# randomly pick one
loop_head = next(iter(scc))
subgraph = graph.subgraph(scc) # type: networkx.DiGraph
for src, _ in subgraph.in_edges(loop_head):
subgraph.remove_edge(src, loop_head)
ordered_nodes.extend(CFGUtils.quasi_topological_sort_nodes(subgraph))
| bsd-2-clause |
stefanv/aandete | app/lib/nose/plugins/__init__.py | 97 | 6291 | """
Writing Plugins
---------------
nose supports plugins for test collection, selection, observation and
reporting. There are two basic rules for plugins:
* Plugin classes should subclass :class:`nose.plugins.Plugin`.
* Plugins may implement any of the methods described in the class
:doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
this class is for documentary purposes only; plugins may not subclass
IPluginInterface.
Hello World
===========
Here's a basic plugin. It doesn't do much so read on for more ideas or dive
into the :doc:`IPluginInterface <interface>` to see all available hooks.
.. code-block:: python
import logging
import os
from nose.plugins import Plugin
log = logging.getLogger('nose.plugins.helloworld')
class HelloWorld(Plugin):
name = 'helloworld'
def options(self, parser, env=os.environ):
super(HelloWorld, self).options(parser, env=env)
def configure(self, options, conf):
super(HelloWorld, self).configure(options, conf)
if not self.enabled:
return
def finalize(self, result):
log.info('Hello pluginized world!')
Registering
===========
.. Note::
Important note: the following applies only to the default
plugin manager. Other plugin managers may use different means to
locate and load plugins.
For nose to find a plugin, it must be part of a package that uses
setuptools_, and the plugin must be included in the entry points defined
in the setup.py for the package:
.. code-block:: python
setup(name='Some plugin',
# ...
entry_points = {
'nose.plugins.0.10': [
'someplugin = someplugin:SomePlugin'
]
},
# ...
)
Once the package is installed with install or develop, nose will be able
to load the plugin.
.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
Registering a plugin without setuptools
=======================================
It is currently possible to register a plugin programmatically by
creating a custom nose runner like this :
.. code-block:: python
import nose
from yourplugin import YourPlugin
if __name__ == '__main__':
nose.main(addplugins=[YourPlugin()])
Defining options
================
All plugins must implement the methods ``options(self, parser, env)``
and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
that want the standard options should call the superclass methods.
nose uses optparse.OptionParser from the standard library to parse
arguments. A plugin's ``options()`` method receives a parser
instance. It's good form for a plugin to use that instance only to add
additional arguments that take only long arguments (--like-this). Most
of nose's built-in arguments get their default value from an environment
variable.
A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
object, as well as the current config object. Plugins should configure their
behavior based on the user-selected settings, and may raise exceptions
if the configured behavior is nonsensical.
Logging
=======
nose uses the logging classes from the standard library. To enable users
to view debug messages easily, plugins should use ``logging.getLogger()`` to
acquire a logger in the ``nose.plugins`` namespace.
Recipes
=======
* Writing a plugin that monitors or controls test result output
Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
results. If you also want to monitor output, implement
``setOutputStream`` and keep a reference to the output stream. If you
want to prevent the builtin ``TextTestResult`` output, implement
``setOutputSteam`` and *return a dummy stream*. The default output will go
to the dummy stream, while you send your desired output to the real stream.
Example: `examples/html_plugin/htmlplug.py`_
* Writing a plugin that handles exceptions
Subclass :doc:`ErrorClassPlugin <errorclasses>`.
Examples: :doc:`nose.plugins.deprecated <deprecated>`,
:doc:`nose.plugins.skip <skip>`
* Writing a plugin that adds detail to error reports
Implement ``formatError`` and/or ``formatFailure``. The error tuple
you return (error class, error message, traceback) will replace the
original error tuple.
Examples: :doc:`nose.plugins.capture <capture>`,
:doc:`nose.plugins.failuredetail <failuredetail>`
* Writing a plugin that loads tests from files other than python modules
Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
return True for files that you want to examine for tests. In
``loadTestsFromFile``, for those files, return an iterable
containing TestCases (or yield them as you find them;
``loadTestsFromFile`` may also be a generator).
Example: :doc:`nose.plugins.doctests <doctests>`
* Writing a plugin that prints a report
Implement ``begin`` if you need to perform setup before testing
begins. Implement ``report`` and output your report to the provided stream.
Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
* Writing a plugin that selects or rejects tests
Implement any or all ``want*`` methods. Return False to reject the test
candidate, True to accept it -- which means that the test candidate
will pass through the rest of the system, so you must be prepared to
load tests from it if tests can't be loaded by the core loader or
another plugin -- and None if you don't care.
Examples: :doc:`nose.plugins.attrib <attrib>`,
:doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
More Examples
=============
See any builtin plugin or example plugin in the examples_ directory in
the nose source distribution. There is a list of third-party plugins
`on jottit`_.
.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
.. _on jottit: http://nose-plugins.jottit.com/
"""
from nose.plugins.base import Plugin
from nose.plugins.manager import *
from nose.plugins.plugintest import PluginTester
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
nirmeshk/oh-mainline | vendor/packages/scrapy/scrapy/webservice.py | 18 | 3191 | """
Scrapy web services extension
See docs/topics/webservice.rst
"""
from twisted.web import server, error
from scrapy.xlib.pydispatch import dispatcher
from scrapy.exceptions import NotConfigured
from scrapy import log, signals
from scrapy.utils.jsonrpc import jsonrpc_server_call
from scrapy.utils.serialize import ScrapyJSONEncoder, ScrapyJSONDecoder
from scrapy.utils.misc import load_object
from scrapy.utils.txweb import JsonResource as JsonResource_
from scrapy.utils.reactor import listen_tcp
from scrapy.utils.conf import build_component_list
class JsonResource(JsonResource_):
def __init__(self, crawler, target=None):
JsonResource_.__init__(self)
self.crawler = crawler
self.json_encoder = ScrapyJSONEncoder(crawler=crawler)
class JsonRpcResource(JsonResource):
def __init__(self, crawler, target=None):
JsonResource.__init__(self, crawler, target)
self.json_decoder = ScrapyJSONDecoder(crawler=crawler)
self.crawler = crawler
self._target = target
def render_GET(self, txrequest):
return self.get_target()
def render_POST(self, txrequest):
reqstr = txrequest.content.getvalue()
target = self.get_target()
return jsonrpc_server_call(target, reqstr, self.json_decoder)
def getChild(self, name, txrequest):
target = self.get_target()
try:
newtarget = getattr(target, name)
return JsonRpcResource(self.crawler, newtarget)
except AttributeError:
return error.NoResource("No such child resource.")
def get_target(self):
return self._target
class RootResource(JsonResource):
def render_GET(self, txrequest):
return {'resources': self.children.keys()}
def getChild(self, name, txrequest):
if name == '':
return self
return JsonResource.getChild(self, name, txrequest)
class WebService(server.Site):
def __init__(self, crawler):
if not crawler.settings.getbool('WEBSERVICE_ENABLED'):
raise NotConfigured
self.crawler = crawler
logfile = crawler.settings['WEBSERVICE_LOGFILE']
self.portrange = map(int, crawler.settings.getlist('WEBSERVICE_PORT'))
self.host = crawler.settings['WEBSERVICE_HOST']
root = RootResource(crawler)
reslist = build_component_list(crawler.settings['WEBSERVICE_RESOURCES_BASE'], \
crawler.settings['WEBSERVICE_RESOURCES'])
for res_cls in map(load_object, reslist):
res = res_cls(crawler)
root.putChild(res.ws_name, res)
server.Site.__init__(self, root, logPath=logfile)
self.noisy = False
dispatcher.connect(self.start_listening, signals.engine_started)
dispatcher.connect(self.stop_listening, signals.engine_stopped)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def start_listening(self):
self.port = listen_tcp(self.portrange, self.host, self)
h = self.port.getHost()
log.msg("Web service listening on %s:%d" % (h.host, h.port), log.DEBUG)
def stop_listening(self):
self.port.stopListening()
| agpl-3.0 |
sunze/py_flask | venv/lib/python3.4/site-packages/sqlalchemy/orm/dynamic.py | 59 | 13283 | # orm/dynamic.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Dynamic collection API.
Dynamic collections act like Query() objects for read operations and support
basic add/delete mutation.
"""
from .. import log, util, exc
from ..sql import operators
from . import (
attributes, object_session, util as orm_util, strategies,
object_mapper, exc as orm_exc, properties
)
from .query import Query
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="dynamic")
class DynaLoader(strategies.AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.is_class_level = True
if not self.uselist:
raise exc.InvalidRequestError(
"On relationship %s, 'dynamic' loaders cannot be used with "
"many-to-one/one-to-one relationships and/or "
"uselist=False." % self.parent_property)
strategies._register_attribute(
self,
mapper,
useobject=True,
uselist=True,
impl_class=DynamicAttributeImpl,
target_mapper=self.parent_property.mapper,
order_by=self.parent_property.order_by,
query_class=self.parent_property.query_class,
backref=self.parent_property.back_populates,
)
class DynamicAttributeImpl(attributes.AttributeImpl):
uses_objects = True
accepts_scalar_loader = False
supports_population = False
collection = False
def __init__(self, class_, key, typecallable,
dispatch,
target_mapper, order_by, query_class=None, **kw):
super(DynamicAttributeImpl, self).\
__init__(class_, key, typecallable, dispatch, **kw)
self.target_mapper = target_mapper
self.order_by = order_by
if not query_class:
self.query_class = AppenderQuery
elif AppenderMixin in query_class.mro():
self.query_class = query_class
else:
self.query_class = mixin_user_query(query_class)
def get(self, state, dict_, passive=attributes.PASSIVE_OFF):
if not passive & attributes.SQL_OK:
return self._get_collection_history(
state, attributes.PASSIVE_NO_INITIALIZE).added_items
else:
return self.query_class(self, state)
def get_collection(self, state, dict_, user_data=None,
passive=attributes.PASSIVE_NO_INITIALIZE):
if not passive & attributes.SQL_OK:
return self._get_collection_history(state,
passive).added_items
else:
history = self._get_collection_history(state, passive)
return history.added_plus_unchanged
@util.memoized_property
def _append_token(self):
return attributes.Event(self, attributes.OP_APPEND)
@util.memoized_property
def _remove_token(self):
return attributes.Event(self, attributes.OP_REMOVE)
def fire_append_event(self, state, dict_, value, initiator,
collection_history=None):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_added(value)
for fn in self.dispatch.append:
value = fn(state, value, initiator or self._append_token)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, True)
def fire_remove_event(self, state, dict_, value, initiator,
collection_history=None):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_removed(value)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
def _modified_event(self, state, dict_):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
state._modified_event(dict_,
self,
attributes.NEVER_SET)
# this is a hack to allow the fixtures.ComparableEntity fixture
# to work
dict_[self.key] = True
return state.committed_state[self.key]
def set(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF,
check_old=None, pop=False):
if initiator and initiator.parent_token is self.parent_token:
return
if pop and value is None:
return
self._set_iterable(state, dict_, value)
def _set_iterable(self, state, dict_, iterable, adapter=None):
new_values = list(iterable)
if state.has_identity:
old_collection = util.IdentitySet(self.get(state, dict_))
collection_history = self._modified_event(state, dict_)
if not state.has_identity:
old_collection = collection_history.added_items
else:
old_collection = old_collection.union(
collection_history.added_items)
idset = util.IdentitySet
constants = old_collection.intersection(new_values)
additions = idset(new_values).difference(constants)
removals = old_collection.difference(constants)
for member in new_values:
if member in additions:
self.fire_append_event(state, dict_, member, None,
collection_history=collection_history)
for member in removals:
self.fire_remove_event(state, dict_, member, None,
collection_history=collection_history)
def delete(self, *args, **kwargs):
raise NotImplementedError()
def set_committed_value(self, state, dict_, value):
raise NotImplementedError("Dynamic attributes don't support "
"collection population.")
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
c = self._get_collection_history(state, passive)
return c.as_history()
def get_all_pending(self, state, dict_,
passive=attributes.PASSIVE_NO_INITIALIZE):
c = self._get_collection_history(
state, passive)
return [
(attributes.instance_state(x), x)
for x in
c.all_items
]
def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF):
if self.key in state.committed_state:
c = state.committed_state[self.key]
else:
c = CollectionHistory(self, state)
if state.has_identity and (passive & attributes.INIT_OK):
return CollectionHistory(self, state, apply_to=c)
else:
return c
def append(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
def remove(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
def pop(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
self.remove(state, dict_, value, initiator, passive=passive)
class AppenderMixin(object):
query_class = None
def __init__(self, attr, state):
super(AppenderMixin, self).__init__(attr.target_mapper, None)
self.instance = instance = state.obj()
self.attr = attr
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
self._criterion = prop._with_parent(
instance,
alias_secondary=False)
if self.attr.order_by:
self._order_by = self.attr.order_by
def session(self):
sess = object_session(self.instance)
if sess is not None and self.autoflush and sess.autoflush \
and self.instance in sess:
sess.flush()
if not orm_util.has_identity(self.instance):
return None
else:
return sess
session = property(session, lambda s, x: None)
def __iter__(self):
sess = self.session
if sess is None:
return iter(self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items)
else:
return iter(self._clone(sess))
def __getitem__(self, index):
sess = self.session
if sess is None:
return self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).indexed(index)
else:
return self._clone(sess).__getitem__(index)
def count(self):
sess = self.session
if sess is None:
return len(self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items)
else:
return self._clone(sess).count()
def _clone(self, sess=None):
# note we're returning an entirely new Query class instance
# here without any assignment capabilities; the class of this
# query is determined by the session.
instance = self.instance
if sess is None:
sess = object_session(instance)
if sess is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session, and no "
"contextual session is established; lazy load operation "
"of attribute '%s' cannot proceed" % (
orm_util.instance_str(instance), self.attr.key))
if self.query_class:
query = self.query_class(self.attr.target_mapper, session=sess)
else:
query = sess.query(self.attr.target_mapper)
query._criterion = self._criterion
query._order_by = self._order_by
return query
def extend(self, iterator):
for item in iterator:
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def append(self, item):
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def remove(self, item):
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
class AppenderQuery(AppenderMixin, Query):
"""A dynamic query that supports basic collection storage operations."""
def mixin_user_query(cls):
"""Return a new class with AppenderQuery functionality layered over."""
name = 'Appender' + cls.__name__
return type(name, (AppenderMixin, cls), {'query_class': cls})
class CollectionHistory(object):
"""Overrides AttributeHistory to receive append/remove events directly."""
def __init__(self, attr, state, apply_to=None):
if apply_to:
coll = AppenderQuery(attr, state).autoflush(False)
self.unchanged_items = util.OrderedIdentitySet(coll)
self.added_items = apply_to.added_items
self.deleted_items = apply_to.deleted_items
self._reconcile_collection = True
else:
self.deleted_items = util.OrderedIdentitySet()
self.added_items = util.OrderedIdentitySet()
self.unchanged_items = util.OrderedIdentitySet()
self._reconcile_collection = False
@property
def added_plus_unchanged(self):
return list(self.added_items.union(self.unchanged_items))
@property
def all_items(self):
return list(self.added_items.union(
self.unchanged_items).union(self.deleted_items))
def as_history(self):
if self._reconcile_collection:
added = self.added_items.difference(self.unchanged_items)
deleted = self.deleted_items.intersection(self.unchanged_items)
unchanged = self.unchanged_items.difference(deleted)
else:
added, unchanged, deleted = self.added_items,\
self.unchanged_items,\
self.deleted_items
return attributes.History(
list(added),
list(unchanged),
list(deleted),
)
def indexed(self, index):
return list(self.added_items)[index]
def add_added(self, value):
self.added_items.add(value)
def add_removed(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| mit |
guillochon/FriendlyFit | mosfit/modules/seds/sed.py | 5 | 3376 | """Definitions for the `SED` class."""
import numpy as np
from astropy import constants as c
from astropy import units as u
from mosfit.modules.module import Module
# Important: Only define one ``Module`` class per file.
class SED(Module):
"""Template class for SED Modules.
Modules that inherit from the SED class should produce a `seds` key, which
contains a spectral energy distribution for each time. The units of the SED
should be in erg/s/Angstrom.
"""
C_OVER_ANG = (c.c / u.Angstrom).cgs.value
def __init__(self, **kwargs):
"""Initialize module."""
super(SED, self).__init__(**kwargs)
self._N_PTS = 24 + 1
self._sample_wavelengths = []
def receive_requests(self, **requests):
"""Receive requests from other ``Module`` objects."""
self._sample_wavelengths = requests.get('sample_wavelengths', [])
if not self._sample_wavelengths:
wave_ranges = requests.get('band_wave_ranges', [])
if not wave_ranges:
return
max_len = 0
for rng in wave_ranges:
min_wav, max_wav = min(rng), max(rng)
rngc = list(rng)
rngc.remove(min_wav)
rngc.remove(max_wav)
self._sample_wavelengths.append(np.unique(np.concatenate(
(np.linspace(min_wav, max_wav,
self._N_PTS - len(rngc)), np.array(rngc)))))
llen = len(self._sample_wavelengths[-1])
if llen > max_len:
max_len = llen
for wi, wavs in enumerate(self._sample_wavelengths):
if len(wavs) != max_len:
self._sample_wavelengths[wi] = np.unique(np.concatenate(
(wavs, (max(wavs) - min(wavs)) * 1.0 / np.exp(
np.arange(1, 1 + max_len - len(
wavs))) + min(wavs))))
if len(self._sample_wavelengths[wi]) != max_len:
raise RuntimeError(
'Could not construct wavelengths for bandpass.')
# Note: Many of these will just be 0 - 1, but faster to have a
# single type numpy array than a ragged list of lists.
self._sample_wavelengths = np.array(self._sample_wavelengths,
dtype=float)
self._sample_frequencies = self.C_OVER_ANG / self._sample_wavelengths
def add_to_existing_seds(self, new_seds, **kwargs):
"""Add SED from module to existing ``seds`` key.
Parameters
----------
new_seds : array
The new SEDs to add to the existing SEDs.
Returns
-------
new_seds : array
The result of summing the new and existing SEDs.
"""
old_seds = kwargs.get('seds', None)
if old_seds is not None:
for i, sed in enumerate(old_seds):
new_seds[i] += sed
return new_seds
def send_request(self, request):
"""Send a request."""
if request == 'sample_wavelengths':
return self._sample_wavelengths
return []
def set_data(self, band_sampling_points):
"""Set SED data."""
self._N_PTS = band_sampling_points
return True
| mit |
jimbydamonk/ansible-modules-core | cloud/amazon/ec2_key.py | 51 | 7841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: maintain an ec2 key pair.
description:
- maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the key pair.
required: true
key_material:
description:
- Public key material.
required: false
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
wait:
description:
- Wait for the specified action to complete before returning.
required: false
default: false
aliases: []
version_added: "1.6"
wait_timeout:
description:
- How long before wait gives up, in seconds
required: false
default: 300
aliases: []
version_added: "1.6"
extends_documentation_fragment:
- aws
- ec2
author: "Vincent Viallet (@zbal)"
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
ec2_key:
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
state: present
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
ec2_key:
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
ec2_key:
name: example
state: absent
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import random
import string
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
state = dict(default='present', choices=['present', 'absent']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
ec2 = ec2_connect(module)
# find the key if present
key = ec2.get_key_pair(name)
# Ensure requested key is absent
if state == 'absent':
if key:
'''found a match, delete it'''
if not module.check_mode:
try:
key.delete()
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if not ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception, e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
key = None
changed = True
# Ensure requested key is present
elif state == 'present':
if key:
# existing key found
if key_material:
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
# find an unused name
test = 'empty'
while test:
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
tmpkeyname = "ansible-" + ''.join(randomchars)
test = ec2.get_key_pair(tmpkeyname)
# create tmp key
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
# get tmp key fingerprint
tmpfingerprint = tmpkey.fingerprint
# delete tmp key
tmpkey.delete()
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
if key_material:
'''We are providing the key, need to import'''
key = ec2.import_key_pair(name, key_material)
else:
'''
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be created")
changed = True
if key:
data = {
'name': key.name,
'fingerprint': key.fingerprint
}
if key.material:
data.update({'private_key': key.material})
module.exit_json(changed=changed, key=data)
else:
module.exit_json(changed=changed, key=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
LarsFronius/ansible | lib/ansible/modules/network/openswitch/ops_facts.py | 70 | 12132 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ops_facts
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Collect device specific facts from OpenSwitch
description:
- Collects facts from devices running the OpenSwitch operating
system. Fact collection is supported over both Cli and Rest
transports. This module prepends all of the base network fact keys
with C(ansible_net_<fact>). The facts module will always collect a
base set of facts from the device and can enable or disable
collection of additional facts.
- The facts collected from pre Ansible 2.2 are still available and
are collected for backwards compatibility; however, these facts
should be considered deprecated and will be removed in a future
release.
extends_documentation_fragment: openswitch
options:
config:
description:
- When enabled, this argument will collect the current
running configuration from the remote device. If the
C(transport=rest) then the collected configuration will
be the full system configuration.
required: false
choices:
- true
- false
default: false
endpoints:
description:
- Accepts a list of endpoints to retrieve from the remote
device using the REST API. The endpoints should be valid
endpoints available on the device. This argument is only
valid when the C(transport=rest).
required: false
default: null
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, legacy, and interfaces. Can specify a
list of values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: netop
password: netop
transport: cli
rest:
host: "{{ inventory_hostname }}"
username: netop
password: netop
transport: rest
---
- ops_facts:
gather_subset: all
provider: "{{ rest }}"
# Collect only the config and default facts
- ops_facts:
gather_subset: config
provider: "{{ cli }}"
# Do not collect config facts
- ops_facts:
gather_subset:
- "!config"
provider: "{{ cli }}"
- name: collect device facts
ops_facts:
provider: "{{ cli }}"
- name: include the config
ops_facts:
config: yes
provider: "{{ rest }}"
- name: include a set of rest endpoints
ops_facts:
endpoints:
- /system/interfaces/1
- /system/interfaces/2
provider: "{{ rest }}"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: when transport is cli
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: when transport is cli
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: The image file the device is running
returned: when transport is cli
type: string
# config
ansible_net_config:
description: The current active config from the device
returned: when config is enabled
type: str
# legacy (pre Ansible 2.2)
config:
description: The current system configuration
returned: when enabled
type: string
sample: '....'
hostname:
description: returns the configured hostname
returned: always
type: string
sample: ops01
version:
description: The current version of OpenSwitch
returned: always
type: string
sample: '0.3.0'
endpoints:
description: The JSON response from the URL endpoint
returned: when endpoints argument is defined and transport is rest
type: list
sample: [{....}, {....}]
"""
import re
import ansible.module_utils.openswitch
from ansible.module_utils.netcli import CommandRunner, AddCommandError
from ansible.module_utils.network import NetworkModule
from ansible.module_utils.six import iteritems
def add_command(runner, command):
try:
runner.add_command(command)
except AddCommandError:
# AddCommandError is raised for any issue adding a command to
# the runner. Silently ignore the exception in this case
pass
class FactsBase(object):
def __init__(self, module, runner):
self.module = module
self.transport = module.params['transport']
self.runner = runner
self.facts = dict()
if self.transport == 'cli':
self.commands()
def commands(self):
raise NotImplementedError
def populate(self):
getattr(self, self.transport)()
def cli(self):
pass
def rest(self):
pass
class Default(FactsBase):
def commands(self):
add_command(self.runner, 'show system')
add_command(self.runner, 'show hostname')
def rest(self):
self.facts.update(self.get_system())
def cli(self):
data = self.runner.get_command('show system')
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
self.facts['hostname'] = self.runner.get_command('show hostname')
def parse_version(self, data):
match = re.search(r'OpenSwitch Version\s+: (\S+)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'Platform\s+:\s(\S+)', data, re.M)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'\(Build: (\S+)\)', data, re.M)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'Serial Number\s+: (\S+)', data)
if match:
return match.group(1)
def get_system(self):
response = self.module.connection.get('/system')
return dict(
hostname=response.json['configuration']['hostname'],
version=response.json['status']['switch_version']
)
class Config(FactsBase):
def commands(self):
add_command(self.runner, 'show running-config')
def cli(self):
self.facts['config'] = self.runner.get_command('show running-config')
class Legacy(FactsBase):
# facts from ops_facts 2.1
def commands(self):
add_command(self.runner, 'show system')
add_command(self.runner, 'show hostname')
if self.module.params['config']:
add_command(self.runner, 'show running-config')
def rest(self):
self.facts['_endpoints'] = self.get_endpoints()
self.facts.update(self.get_system())
if self.module.params['config']:
self.facts['_config'] = self.get_config()
def cli(self):
self.facts['_hostname'] = self.runner.get_command('show hostname')
data = self.runner.get_command('show system')
self.facts['_version'] = self.parse_version(data)
if self.module.params['config']:
self.facts['_config'] = self.runner.get_command('show running-config')
def parse_version(self, data):
match = re.search(r'OpenSwitch Version\s+: (\S+)', data)
if match:
return match.group(1)
def get_endpoints(self):
responses = list()
urls = self.module.params['endpoints'] or list()
for ep in urls:
response = self.module.connection.get(ep)
if response.headers['status'] != 200:
self.module.fail_json(msg=response.headers['msg'])
responses.append(response.json)
return responses
def get_system(self):
response = self.module.connection.get('/system')
return dict(
_hostname=response.json['configuration']['hostname'],
_version=response.json['status']['switch_version']
)
def get_config(self):
response = self.module.connection.get('/system/full-configuration')
return response.json
def check_args(module, warnings):
if module.params['transport'] != 'rest' and module.params['endpoints']:
warnings.append('Endpoints can only be collected when transport is '
'set to "rest". Endpoints will not be collected')
FACT_SUBSETS = dict(
default=Default,
config=Config,
legacy=Legacy
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list'),
# the next two arguments are legacy from pre 2.2 ops_facts
# these will be deprecated and ultimately removed
config=dict(default=False, type='bool'),
endpoints=dict(type='list'),
transport=dict(default='cli', choices=['cli', 'rest'])
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
warnings = list()
check_args(module, warnings)
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
runable_subsets.add('legacy')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
runner = CommandRunner(module)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module, runner))
if module.params['transport'] == 'cli':
runner.run()
try:
for inst in instances:
inst.populate()
facts.update(inst.facts)
except Exception:
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
for key, value in iteritems(facts):
# this is to maintain capability with ops_facts 2.1
if key.startswith('_'):
ansible_facts[key[1:]] = value
else:
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
trondhindenes/ansible | lib/ansible/plugins/connection/paramiko_ssh.py | 19 | 22714 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
connection: paramiko
short_description: Run tasks via python ssh (paramiko)
description:
- Use the python ssh implementation (Paramiko) to connect to targets
- The paramiko transport is provided because many distributions, in particular EL6 and before do not support ControlPersist
in their SSH implementations.
- This is needed on the Ansible control machine to be reasonably efficient with connections.
Thus paramiko is faster for most users on these platforms.
Users with ControlPersist capability can consider using -c ssh or configuring the transport in the configuration file.
- This plugin also borrows a lot of settings from the ssh plugin as they both cover the same protocol.
version_added: "0.1"
options:
remote_addr:
description:
- Address of the remote target
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_ssh_host
- name: ansible_paramiko_host
remote_user:
description:
- User to login/authenticate as
- Can be set from the CLI via the C(--user) or C(-u) options.
vars:
- name: ansible_user
- name: ansible_ssh_user
- name: ansible_paramiko_user
env:
- name: ANSIBLE_REMOTE_USER
- name: ANSIBLE_PARAMIKO_REMOTE_USER
version_added: '2.5'
ini:
- section: defaults
key: remote_user
- section: paramiko_connection
key: remote_user
version_added: '2.5'
password:
description:
- Secret used to either login the ssh server or as a passphrase for ssh keys that require it
- Can be set from the CLI via the C(--ask-pass) option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_paramiko_pass
version_added: '2.5'
host_key_auto_add:
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
ini:
- {key: host_key_auto_add, section: paramiko_connection}
type: boolean
look_for_keys:
default: True
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
ini:
- {key: look_for_keys, section: paramiko_connection}
type: boolean
proxy_command:
default: ''
description:
- Proxy information for running the connection via a jumphost
- Also this plugin will scan 'ssh_args', 'ssh_extra_args' and 'ssh_common_args' from the 'ssh' plugin settings for proxy information if set.
env: [{name: ANSIBLE_PARAMIKO_PROXY_COMMAND}]
ini:
- {key: proxy_command, section: paramiko_connection}
pty:
default: True
description: 'TODO: write it'
env:
- name: ANSIBLE_PARAMIKO_PTY
ini:
- section: paramiko_connection
key: pty
type: boolean
record_host_keys:
default: True
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS}]
ini:
- section: paramiko_connection
key: record_host_keys
type: boolean
host_key_checking:
description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
type: boolean
default: True
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
- name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING
version_added: '2.5'
ini:
- section: defaults
key: host_key_checking
- section: paramiko_connection
key: host_key_checking
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
- name: ansible_paramiko_host_key_checking
version_added: '2.5'
use_persistent_connections:
description: 'Toggles the use of persistence for connections'
type: boolean
default: False
env:
- name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
ini:
- section: defaults
key: use_persistent_connections
# TODO:
#timeout=self._play_context.timeout,
"""
import warnings
import os
import socket
import tempfile
import traceback
import fcntl
import sys
import re
from termios import tcflush, TCIFLUSH
from binascii import hexlify
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import input
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import makedirs_safe
from ansible.module_utils._text import to_bytes, to_native
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
AUTHENTICITY_MSG = """
paramiko: The authenticity of host '%s' can't be established.
The %s key fingerprint is %s.
Are you sure you want to continue connecting (yes/no)?
"""
# SSH Options Regex
SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
HAVE_PARAMIKO = False
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
import paramiko
HAVE_PARAMIKO = True
except ImportError:
pass
class MyAddPolicy(object):
"""
Based on AutoAddPolicy in paramiko so we can determine when keys are added
and also prompt for input.
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def __init__(self, new_stdin, connection):
self._new_stdin = new_stdin
self.connection = connection
self._options = connection._options
def missing_host_key(self, client, hostname, key):
if all((self._options['host_key_checking'], not self._options['host_key_auto_add'])):
fingerprint = hexlify(key.get_fingerprint())
ktype = key.get_name()
if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence:
# don't print the prompt string since the user cannot respond
# to the question anyway
raise AnsibleError(AUTHENTICITY_MSG[1:92] % (hostname, ktype, fingerprint))
self.connection.connection_lock()
old_stdin = sys.stdin
sys.stdin = self._new_stdin
# clear out any premature input on sys.stdin
tcflush(sys.stdin, TCIFLUSH)
inp = input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
sys.stdin = old_stdin
self.connection.connection_unlock()
if inp not in ['yes', 'y', '']:
raise AnsibleError("host connection rejected by user")
key._added_by_ansible_this_time = True
# existing implementation below:
client._host_keys.add(hostname, key.get_name(), key)
# host keys are actually saved in close() function below
# in order to control ordering.
# keep connection objects on a per host basis to avoid repeated attempts to reconnect
SSH_CONNECTION_CACHE = {}
SFTP_CONNECTION_CACHE = {}
class Connection(ConnectionBase):
''' SSH based connections with Paramiko '''
transport = 'paramiko'
_log_channel = None
def _cache_key(self):
return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
def _connect(self):
cache_key = self._cache_key()
if cache_key in SSH_CONNECTION_CACHE:
self.ssh = SSH_CONNECTION_CACHE[cache_key]
else:
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
return self
def _set_log_channel(self, name):
'''Mimic paramiko.SSHClient.set_log_channel'''
self._log_channel = name
def _parse_proxy_command(self, port=22):
proxy_command = None
# Parse ansible_ssh_common_args, specifically looking for ProxyCommand
ssh_args = [
getattr(self._play_context, 'ssh_extra_args', '') or '',
getattr(self._play_context, 'ssh_common_args', '') or '',
getattr(self._play_context, 'ssh_args', '') or '',
]
if ssh_args is not None:
args = self._split_ssh_args(' '.join(ssh_args))
for i, arg in enumerate(args):
if arg.lower() == 'proxycommand':
# _split_ssh_args split ProxyCommand from the command itself
proxy_command = args[i + 1]
else:
# ProxyCommand and the command itself are a single string
match = SETTINGS_REGEX.match(arg)
if match:
if match.group(1).lower() == 'proxycommand':
proxy_command = match.group(2)
if proxy_command:
break
proxy_command = proxy_command or self.get_option('proxy_command')
sock_kwarg = {}
if proxy_command:
replacers = {
'%h': self._play_context.remote_addr,
'%p': port,
'%r': self._play_context.remote_user
}
for find, replace in replacers.items():
proxy_command = proxy_command.replace(find, str(replace))
try:
sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self._play_context.remote_addr)
except AttributeError:
display.warning('Paramiko ProxyCommand support unavailable. '
'Please upgrade to Paramiko 1.9.0 or newer. '
'Not using configured ProxyCommand')
return sock_kwarg
def _connect_uncached(self):
''' activates the connection object '''
if not HAVE_PARAMIKO:
raise AnsibleError("paramiko is not installed")
port = self._play_context.port or 22
display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr),
host=self._play_context.remote_addr)
ssh = paramiko.SSHClient()
# override paramiko's default logger name
if self._log_channel is not None:
ssh.set_log_channel(self._log_channel)
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
if self.get_option('host_key_checking'):
for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"):
try:
# TODO: check if we need to look at several possible locations, possible for loop
ssh.load_system_host_keys(ssh_known_hosts)
break
except IOError:
pass # file was not found, but not required to function
ssh.load_system_host_keys()
sock_kwarg = self._parse_proxy_command(port)
ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))
allow_agent = True
if self._play_context.password is not None:
allow_agent = False
try:
key_filename = None
if self._play_context.private_key_file:
key_filename = os.path.expanduser(self._play_context.private_key_file)
ssh.connect(
self._play_context.remote_addr.lower(),
username=self._play_context.remote_user,
allow_agent=allow_agent,
look_for_keys=self.get_option('look_for_keys'),
key_filename=key_filename,
password=self._play_context.password,
timeout=self._play_context.timeout,
port=port,
**sock_kwarg
)
except paramiko.ssh_exception.BadHostKeyException as e:
raise AnsibleConnectionFailure('host key mismatch for %s' % e.hostname)
except Exception as e:
msg = str(e)
if "PID check failed" in msg:
raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif "Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
self._play_context.remote_user, self._play_context.remote_addr, port, msg)
raise AnsibleConnectionFailure(msg)
else:
raise AnsibleConnectionFailure(msg)
return ssh
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
bufsize = 4096
try:
self.ssh.get_transport().set_keepalive(5)
chan = self.ssh.get_transport().open_session()
except Exception as e:
msg = "Failed to open session"
if len(str(e)) > 0:
msg += ": %s" % str(e)
raise AnsibleConnectionFailure(msg)
# sudo usually requires a PTY (cf. requiretty option), therefore
# we give it one by default (pty=True in ansible.cfg), and we try
# to initialise from the calling environment when sudoable is enabled
if self.get_option('pty') and sudoable:
chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
cmd = to_bytes(cmd, errors='surrogate_or_strict')
no_prompt_out = b''
no_prompt_err = b''
become_output = b''
try:
chan.exec_command(cmd)
if self._play_context.prompt:
passprompt = False
become_sucess = False
while not (become_sucess or passprompt):
display.debug('Waiting for Privilege Escalation input')
chunk = chan.recv(bufsize)
display.debug("chunk is: %s" % chunk)
if not chunk:
if b'unknown user' in become_output:
raise AnsibleError('user %s does not exist' % self._play_context.become_user)
else:
break
# raise AnsibleError('ssh connection closed waiting for password prompt')
become_output += chunk
# need to check every line because we might get lectured
# and we might get the middle of a line in a chunk
for l in become_output.splitlines(True):
if self.check_become_success(l):
become_sucess = True
break
elif self.check_password_prompt(l):
passprompt = True
break
if passprompt:
if self._play_context.become and self._play_context.become_pass:
chan.sendall(to_bytes(self._play_context.become_pass) + b'\n')
else:
raise AnsibleError("A password is required but none was supplied")
else:
no_prompt_out += become_output
no_prompt_err += become_output
except socket.timeout:
raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
stdout = b''.join(chan.makefile('rb', bufsize))
stderr = b''.join(chan.makefile_stderr('rb', bufsize))
return (chan.recv_exit_status(), no_prompt_out + stdout, no_prompt_out + stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
self.sftp = self.ssh.open_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % e)
try:
self.sftp.put(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except IOError:
raise AnsibleError("failed to transfer file to %s" % out_path)
def _connect_sftp(self):
cache_key = "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
if cache_key in SFTP_CONNECTION_CACHE:
return SFTP_CONNECTION_CACHE[cache_key]
else:
result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
return result
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
try:
self.sftp = self._connect_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % to_native(e))
try:
self.sftp.get(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except IOError:
raise AnsibleError("failed to transfer file from %s" % in_path)
def _any_keys_added(self):
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
return True
return False
def _save_ssh_host_keys(self, filename):
'''
not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
don't complain about it :)
'''
if not self._any_keys_added():
return False
path = os.path.expanduser("~/.ssh")
makedirs_safe(path)
f = open(filename, 'w')
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
# was f.write
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if not added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
f.close()
def close(self):
''' terminate the connection '''
cache_key = self._cache_key()
SSH_CONNECTION_CACHE.pop(cache_key, None)
SFTP_CONNECTION_CACHE.pop(cache_key, None)
if hasattr(self, 'sftp'):
if self.sftp is not None:
self.sftp.close()
if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added():
# add any new SSH host keys -- warning -- this could be slow
# (This doesn't acquire the connection lock because it needs
# to exclude only other known_hosts writers, not connections
# that are starting up.)
lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock")
dirname = os.path.dirname(self.keyfile)
makedirs_safe(dirname)
KEY_LOCK = open(lockfile, 'w')
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
try:
# just in case any were added recently
self.ssh.load_system_host_keys()
self.ssh._host_keys.update(self.ssh._system_host_keys)
# gather information about the current key file, so
# we can ensure the new file has the correct mode/owner
key_dir = os.path.dirname(self.keyfile)
if os.path.exists(self.keyfile):
key_stat = os.stat(self.keyfile)
mode = key_stat.st_mode
uid = key_stat.st_uid
gid = key_stat.st_gid
else:
mode = 33188
uid = os.getuid()
gid = os.getgid()
# Save the new keys to a temporary file and move it into place
# rather than rewriting the file. We set delete=False because
# the file will be moved into place rather than cleaned up.
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
os.chmod(tmp_keyfile.name, mode & 0o7777)
os.chown(tmp_keyfile.name, uid, gid)
self._save_ssh_host_keys(tmp_keyfile.name)
tmp_keyfile.close()
os.rename(tmp_keyfile.name, self.keyfile)
except:
# unable to save keys, including scenario when key was invalid
# and caught earlier
traceback.print_exc()
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
self.ssh.close()
| gpl-3.0 |
roadmapper/ansible | lib/ansible/module_utils/aws/rds.py | 13 | 11301 | # Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_text
from ansible.module_utils.aws.waiters import get_waiter
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from ansible.module_utils.ec2 import compare_aws_tags, AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
try:
from botocore.exceptions import BotoCoreError, ClientError, WaiterError
except ImportError:
pass
from collections import namedtuple
from time import sleep
Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'cluster', 'instance'])
# Whitelist boto3 client methods for cluster and instance resources
cluster_method_names = [
'create_db_cluster', 'restore_db_cluster_from_db_snapshot', 'restore_db_cluster_from_s3',
'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
]
instance_method_names = [
'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance'
]
def get_rds_method_attribute(method_name, module):
readable_op = method_name.replace('_', ' ').replace('db', 'DB')
if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
cluster = True
instance = False
if method_name == 'delete_db_cluster':
waiter = 'cluster_deleted'
else:
waiter = 'cluster_available'
elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
cluster = False
instance = True
if method_name == 'delete_db_instance':
waiter = 'db_instance_deleted'
elif method_name == 'stop_db_instance':
waiter = 'db_instance_stopped'
else:
waiter = 'db_instance_available'
else:
raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/aws/rds.py".format(method_name))
return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, cluster=cluster, instance=instance)
def get_final_identifier(method_name, module):
apply_immediately = module.params['apply_immediately']
if get_rds_method_attribute(method_name, module).cluster:
identifier = module.params['db_cluster_identifier']
updated_identifier = module.params['new_db_cluster_identifier']
elif get_rds_method_attribute(method_name, module).instance:
identifier = module.params['db_instance_identifier']
updated_identifier = module.params['new_db_instance_identifier']
else:
raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/aws/rds.py".format(method_name))
if not module.check_mode and updated_identifier and apply_immediately:
identifier = updated_identifier
return identifier
def handle_errors(module, exception, method_name, parameters):
if not isinstance(exception, ClientError):
module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
changed = True
error_code = exception.response['Error']['Code']
if method_name == 'modify_db_instance' and error_code == 'InvalidParameterCombination':
if 'No modifications were requested' in to_text(exception):
changed = False
elif 'ModifyDbCluster API' in to_text(exception):
module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
if 'DB Instance is not a read replica' in to_text(exception):
changed = False
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
elif method_name == 'create_db_instance' and exception.response['Error']['Code'] == 'InvalidParameterValue':
accepted_engines = [
'aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-se',
'oracle-se1', 'oracle-se2', 'postgres', 'sqlserver-ee', 'sqlserver-ex', 'sqlserver-se', 'sqlserver-web'
]
if parameters.get('Engine') not in accepted_engines:
module.fail_json_aws(exception, msg='DB engine {0} should be one of {1}'.format(parameters.get('Engine'), accepted_engines))
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
return changed
def call_method(client, module, method_name, parameters):
result = {}
changed = True
if not module.check_mode:
wait = module.params['wait']
# TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes
method = getattr(client, method_name)
try:
if method_name == 'modify_db_instance':
# check if instance is in an available state first, if possible
if wait:
wait_for_status(client, module, module.params['db_instance_identifier'], method_name)
result = AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidDBInstanceState'])(method)(**parameters)
else:
result = AWSRetry.jittered_backoff()(method)(**parameters)
except (BotoCoreError, ClientError) as e:
changed = handle_errors(module, e, method_name, parameters)
if wait and changed:
identifier = get_final_identifier(method_name, module)
wait_for_status(client, module, identifier, method_name)
return result, changed
def wait_for_instance_status(client, module, db_instance_id, waiter_name):
def wait(client, db_instance_id, waiter_name, extra_retry_codes):
retry = AWSRetry.jittered_backoff(catch_extra_error_codes=extra_retry_codes)
try:
waiter = client.get_waiter(waiter_name)
except ValueError:
# using a waiter in ansible.module_utils.aws.waiters
waiter = get_waiter(client, waiter_name)
waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
waiter_expected_status = {
'db_instance_deleted': 'deleted',
'db_instance_stopped': 'stopped',
}
expected_status = waiter_expected_status.get(waiter_name, 'available')
if expected_status == 'available':
extra_retry_codes = ['DBInstanceNotFound']
else:
extra_retry_codes = []
for attempt_to_wait in range(0, 10):
try:
wait(client, db_instance_id, waiter_name, extra_retry_codes)
break
except WaiterError as e:
# Instance may be renamed and AWSRetry doesn't handle WaiterError
if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
sleep(10)
continue
module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
db_instance_id, expected_status)
)
def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
try:
waiter = get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
except WaiterError as e:
if waiter_name == 'cluster_deleted':
msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
else:
msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
module.fail_json_aws(e, msg=msg)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
def wait_for_status(client, module, identifier, method_name):
waiter_name = get_rds_method_attribute(method_name, module).waiter
if get_rds_method_attribute(method_name, module).cluster:
wait_for_cluster_status(client, module, identifier, waiter_name)
elif get_rds_method_attribute(method_name, module).instance:
wait_for_instance_status(client, module, identifier, waiter_name)
else:
raise NotImplementedError("method {0} hasn't been added to the whitelist of handled methods".format(method_name))
def get_tags(client, module, cluster_arn):
try:
return boto3_tag_list_to_ansible_dict(
client.list_tags_for_resource(ResourceName=cluster_arn)['TagList']
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to describe tags")
def arg_spec_to_rds_params(options_dict):
tags = options_dict.pop('tags')
has_processor_features = False
if 'processor_features' in options_dict:
has_processor_features = True
processor_features = options_dict.pop('processor_features')
camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
for key in list(camel_options.keys()):
for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
if old in key:
camel_options[key.replace(old, new)] = camel_options.pop(key)
camel_options['Tags'] = tags
if has_processor_features:
camel_options['ProcessorFeatures'] = processor_features
return camel_options
def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
if tags is None:
return False
tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
changed = bool(tags_to_add or tags_to_remove)
if tags_to_add:
call_method(
client, module, method_name='add_tags_to_resource',
parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
)
if tags_to_remove:
call_method(
client, module, method_name='remove_tags_from_resource',
parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
)
return changed
| gpl-3.0 |
MaplePlan/djwp | django/contrib/auth/management/commands/changepassword.py | 126 | 1975 | import getpass
from optparse import make_option
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
)
help = "Change a user's password for django.contrib.auth."
requires_model_validation = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=prompt)
if not p:
raise CommandError("aborted")
return p
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options.get('database')).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
while p1 != p2 and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count = count + 1
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| lgpl-3.0 |
zoufishanmehdi/Uncharted | node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| mit |
nathanielherman/silo | benchmarks/results/istc3-8-16-13_multipart_skew.py | 2 | 11299 | RESULTS = [({'disable_gc': False, 'scale_factor': 4, 'db': 'kvdb-st', 'par_load': False, 'threads': 1, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --enable-separate-tree-per-partition --enable-partition-locks ', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '16G', 'persist': False, 'disable_snapshots': False}, [(43468.0, 43468.0, 0.0229553, 0.0, 0.0), (42505.9, 42505.9, 0.0234737, 0.0, 0.0), (43154.0, 43154.0, 0.0231245, 0.0, 0.0)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 1, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '4G', 'persist': False, 'disable_snapshots': False}, [(30754.7, 30754.7, 0.0324678, 0.0, 0.0), (30117.4, 30117.4, 0.0331507, 0.0, 0.0), (29800.0, 29800.0, 0.0335089, 0.0, 0.0)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 2, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '8G', 'persist': False, 'disable_snapshots': False}, [(61867.5, 61867.5, 0.0322773, 0.0, 3.19998), (62408.2, 62408.2, 0.0319925, 0.0, 3.06664), (61668.9, 61668.9, 0.0323773, 0.0, 2.96665)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 4, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '16G', 'persist': False, 'disable_snapshots': False}, [(116912.0, 116912.0, 0.0341545, 0.0, 17.1332), (113059.0, 113059.0, 0.0353051, 0.0, 17.3165), (115758.0, 115758.0, 0.0344966, 0.0, 17.2999)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 6, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '24G', 'persist': False, 'disable_snapshots': False}, [(159447.0, 159447.0, 0.0350288, 0.0, 9514.06), (159379.0, 159379.0, 0.0350259, 0.0, 9480.49), (159677.0, 159677.0, 0.0349627, 0.0, 9492.04)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 8, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '32G', 'persist': False, 'disable_snapshots': False}, [(199853.0, 199853.0, 0.0359222, 0.0, 18743.0), (198260.0, 198260.0, 0.0362176, 0.0, 18613.7), (197791.0, 197791.0, 0.0362914, 0.0, 18554.3)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 10, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '40G', 'persist': False, 'disable_snapshots': False}, [(179239.0, 179239.0, 0.0465518, 0.0, 27440.2), (175376.0, 175376.0, 0.0474159, 0.0, 27047.5), (179595.0, 179595.0, 0.0464574, 0.0, 27482.7)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 12, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '48G', 'persist': False, 'disable_snapshots': False}, [(137867.0, 137867.0, 0.0666319, 0.0, 28550.1), (137801.0, 137801.0, 0.0662968, 0.0, 28607.6), (140053.0, 140053.0, 0.0655534, 0.0, 29200.2)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 16, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '64G', 'persist': False, 'disable_snapshots': False}, [(110439.0, 110439.0, 0.0982468, 0.0, 35981.9), (110248.0, 110248.0, 0.0982313, 0.0, 35980.5), (111519.0, 111519.0, 0.0973957, 0.0, 36321.1)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 20, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '80G', 'persist': False, 'disable_snapshots': False}, [(58960.5, 58960.5, 0.189055, 0.0, 29293.0), (58409.4, 58409.4, 0.18975, 0.0, 28755.0), (56021.4, 56021.4, 0.199431, 0.0, 27870.6)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 24, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '96G', 'persist': False, 'disable_snapshots': False}, [(48373.0, 48373.0, 0.249847, 0.0, 30793.9), (48672.1, 48672.1, 0.2476, 0.0, 31244.2), (48770.2, 48770.2, 0.246519, 0.0, 31328.5)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 28, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '112G', 'persist': False, 'disable_snapshots': False}, [(27698.3, 27698.3, 0.429631, 0.0, 22725.1), (28482.3, 28482.3, 0.417004, 0.0, 23111.7), (27963.0, 27963.0, 0.427409, 0.0, 22980.8)])] + [({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 1, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '4G', 'persist': False, 'disable_snapshots': False}, [(30943.9, 30943.9, 0.0322558, 0.0, 0.0), (31465.1, 31465.1, 0.0317251, 0.0, 0.0), (31357.4, 31357.4, 0.0318378, 0.0, 0.0)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 2, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '8G', 'persist': False, 'disable_snapshots': False}, [(62177.6, 62177.6, 0.0321066, 0.0, 2.89999), (62432.7, 62432.7, 0.0319747, 0.0, 2.94999), (62648.0, 62648.0, 0.0318621, 0.0, 3.38331)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 4, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '16G', 'persist': False, 'disable_snapshots': False}, [(118895.0, 118895.0, 0.0335782, 0.0, 17.9332), (117912.0, 117912.0, 0.0338585, 0.0, 17.7666), (118688.0, 118688.0, 0.0336305, 0.0, 17.6999)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 6, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '24G', 'persist': False, 'disable_snapshots': False}, [(171283.0, 171283.0, 0.0346632, 0.0, 890.859), (170433.0, 170433.0, 0.0348388, 0.0, 873.825), (171277.0, 171277.0, 0.0346634, 0.0, 892.642)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 8, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '32G', 'persist': False, 'disable_snapshots': False}, [(219415.0, 219415.0, 0.0359304, 0.0, 1712.53), (219942.0, 219942.0, 0.0358451, 0.0, 1716.32), (218084.0, 218084.0, 0.0361459, 0.0, 1706.6)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 10, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '40G', 'persist': False, 'disable_snapshots': False}, [(206212.0, 206212.0, 0.0472844, 0.0, 2656.74), (205616.0, 205616.0, 0.047406, 0.0, 2657.5), (206888.0, 206888.0, 0.0471279, 0.0, 2660.49)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 12, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '48G', 'persist': False, 'disable_snapshots': False}, [(169345.0, 169345.0, 0.0685426, 0.0, 2789.03), (170003.0, 170003.0, 0.0683121, 0.0, 2803.62), (169983.0, 169983.0, 0.0683306, 0.0, 2804.06)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 16, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '64G', 'persist': False, 'disable_snapshots': False}, [(149677.0, 149677.0, 0.10188, 0.0, 3785.16), (145768.0, 145768.0, 0.104591, 0.0, 3691.9), (149610.0, 149610.0, 0.101906, 0.0, 3791.05)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 20, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '80G', 'persist': False, 'disable_snapshots': False}, [(83235.8, 83235.8, 0.22186, 0.0, 2809.16), (85806.4, 85806.4, 0.216144, 0.0, 2920.95), (88497.0, 88497.0, 0.209852, 0.0, 3036.64)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 24, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '96G', 'persist': False, 'disable_snapshots': False}, [(75410.7, 75410.7, 0.29186, 0.0, 3327.34), (81033.5, 81033.5, 0.271947, 0.0, 3516.03), (80770.0, 80770.0, 0.27314, 0.0, 3443.05)]), ({'disable_gc': False, 'scale_factor': 4, 'db': 'ndb-proto2', 'par_load': False, 'threads': 28, 'log_compress': False, 'bench_opts': '--workload-mix 100,0,0,0,0 --new-order-fast-id-gen', 'log_fake_writes': False, 'retry': False, 'log_nofsync': False, 'name': 'multipart:skew', 'bench': 'tpcc', 'numa_memory': '112G', 'persist': False, 'disable_snapshots': False}, [(52767.3, 52767.3, 0.47619, 0.0, 2730.02), (51759.0, 51759.0, 0.483218, 0.0, 2647.96), (51844.8, 51844.8, 0.483051, 0.0, 2664.81)])]
| mit |
hugovk/terroroftinytown | terroroftinytown/tracker/model.py | 1 | 27062 | # encoding=utf-8
import base64
import calendar
import contextlib
import datetime
import hmac
import json
import os
import random
import subprocess
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.orm.session import make_transient
from sqlalchemy.orm.util import object_state
from sqlalchemy.sql.expression import insert, select, delete, exists
from sqlalchemy.sql.functions import func
from sqlalchemy.sql.schema import Column, ForeignKey
from sqlalchemy.sql.sqltypes import String, LargeBinary, Float, Boolean, Integer, \
DateTime
from sqlalchemy.sql.type_api import TypeDecorator
from terroroftinytown.client import VERSION
from terroroftinytown.client.alphabet import str_to_int, int_to_str
from terroroftinytown.tracker.errors import NoItemAvailable, FullClaim, UpdateClient, \
InvalidClaim, NoResourcesAvailable
from terroroftinytown.tracker.stats import Stats
# These overrides for major api changes
MIN_VERSION_OVERRIDE = 45 # for terroroftinytown.client
MIN_CLIENT_VERSION_OVERRIDE = 7 # for terrofoftinytown-client-grab/pipeline.py
DEADMAN_MAX_ERROR_REPORTS = 4000
DEADMAN_MAX_RESULTS = 40000000
Base = declarative_base()
Session = sessionmaker()
@contextlib.contextmanager
def new_session():
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class JsonType(TypeDecorator):
impl = String
def process_bind_param(self, value, engine):
return json.dumps(value)
def process_result_value(self, value, engine):
if value:
return json.loads(value)
else:
return None
class GlobalSetting(Base):
__tablename__ = 'global_settings'
key = Column(String, primary_key=True)
value = Column(JsonType)
AUTO_DELETE_ERROR_REPORTS = 'auto_delete_error_reports'
@classmethod
def set_value(cls, key, value):
with new_session() as session:
setting = session.query(GlobalSetting).filter_by(key=key).first()
if setting:
setting.value = value
else:
setting = GlobalSetting(key=key, value=value)
session.add(setting)
@classmethod
def get_value(cls, key):
with new_session() as session:
setting = session.query(GlobalSetting).filter_by(key=key).first()
if setting:
return setting.value
class User(Base):
'''User accounts that manager the tracker.'''
__tablename__ = 'users'
username = Column(String, primary_key=True)
salt = Column(LargeBinary, nullable=False)
hash = Column(LargeBinary, nullable=False)
def set_password(self, password):
self.salt = new_salt()
self.hash = make_hash(password, self.salt)
def check_password(self, password):
test_hash = make_hash(password, self.salt)
return compare_digest(self.hash, test_hash)
def get_token(self):
return make_hash(self.username, self.salt)
def check_token(self, test_token):
token = self.get_token()
return compare_digest(token, test_token)
@classmethod
def no_users_exist(cls):
with new_session() as session:
user = session.query(User).first()
return user is None
@classmethod
def is_user_exists(cls, username):
with new_session() as session:
user = session.query(User).filter_by(username=username).first()
return user is not None
@classmethod
def all_usernames(cls):
with new_session() as session:
users = session.query(User.username)
return list([user.username for user in users])
@classmethod
def save_new_user(cls, username, password):
with new_session() as session:
user = User(username=username)
user.set_password(password)
session.add(user)
@classmethod
def check_account(cls, username, password):
with new_session() as session:
user = session.query(User).filter_by(username=username).first()
if user:
return user.check_password(password)
@classmethod
def update_password(cls, username, password):
with new_session() as session:
user = session.query(User).filter_by(username=username).first()
user.set_password(password)
@classmethod
def delete_user(cls, username):
with new_session() as session:
session.query(User).filter_by(username=username).delete()
@classmethod
def get_user_token(cls, username):
with new_session() as session:
return session.query(User).filter_by(username=username)\
.first().get_token()
@classmethod
def check_account_session(cls, username, token):
with new_session() as session:
user = session.query(User).filter_by(username=username).first()
if not user:
return
return user.check_token(token)
class Project(Base):
'''Project settings.'''
__tablename__ = 'projects'
name = Column(String, primary_key=True)
min_version = Column(Integer, default=VERSION, nullable=False)
min_client_version = Column(Integer, default=MIN_CLIENT_VERSION_OVERRIDE, nullable=False)
alphabet = Column(String, default='0123456789abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
nullable=False)
url_template = Column(String, default='http://example.com/{shortcode}',
nullable=False)
request_delay = Column(Float, default=0.5, nullable=False)
redirect_codes = Column(JsonType, default=[301, 302, 303, 307],
nullable=False)
no_redirect_codes = Column(JsonType, default=[404], nullable=False)
unavailable_codes = Column(JsonType, default=[200])
banned_codes = Column(JsonType, default=[403, 420, 429])
body_regex = Column(String)
location_anti_regex = Column(String)
method = Column(String, default='head', nullable=False)
enabled = Column(Boolean, default=False)
autoqueue = Column(Boolean, default=False)
num_count_per_item = Column(Integer, default=50, nullable=False)
max_num_items = Column(Integer, default=100, nullable=False)
lower_sequence_num = Column(Integer, default=0, nullable=False)
autorelease_time = Column(Integer, default=60 * 30)
def to_dict(self, with_shortcode=False):
ans = {x.key:x.value for x in object_state(self).attrs}
if with_shortcode:
ans['lower_shortcode'] = self.lower_shortcode()
return ans
def lower_shortcode(self):
return int_to_str(self.lower_sequence_num, self.alphabet)
@classmethod
def all_project_names(cls):
with new_session() as session:
projects = session.query(Project.name)
return list([project.name for project in projects])
@classmethod
def all_project_infos(cls):
with new_session() as session:
projects = session.query(Project)
return list([project.to_dict(with_shortcode=True) for project in projects])
@classmethod
def new_project(cls, name):
with new_session() as session:
project = Project(name=name)
session.add(project)
@classmethod
def get_plain(cls, name):
with new_session() as session:
project = session.query(Project).filter_by(name=name).first()
make_transient(project)
return project
@classmethod
@contextlib.contextmanager
def get_session_object(cls, name):
with new_session() as session:
project = session.query(Project).filter_by(name=name).first()
yield project
@classmethod
def delete_project(cls, name):
# FIXME: need to cascade the deletes
with new_session() as session:
session.query(Project).filter_by(name=name).delete()
class Item(Base):
__tablename__ = 'items'
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('projects.name'), nullable=False)
project = relationship('Project')
lower_sequence_num = Column(Integer, nullable=False)
upper_sequence_num = Column(Integer, nullable=False)
datetime_claimed = Column(DateTime)
tamper_key = Column(String)
username = Column(String)
ip_address = Column(String)
def to_dict(self, with_shortcode=False):
ans = {x.key:x.value for x in object_state(self).attrs}
ans.update({
'project': self.project.to_dict(),
'datetime_claimed': calendar.timegm(self.datetime_claimed.utctimetuple()) if self.datetime_claimed else None,
})
if with_shortcode:
ans['lower_shortcode'] = int_to_str(self.lower_sequence_num, self.project.alphabet)
ans['upper_shortcode'] = int_to_str(self.upper_sequence_num, self.project.alphabet)
return ans
@classmethod
def get_items(cls, project_id):
with new_session() as session:
rows = session.query(Item).filter_by(project_id=project_id).order_by(Item.datetime_claimed)
return list([item.to_dict(with_shortcode=True) for item in rows])
@classmethod
def add_items(cls, project_id, sequence_list):
with new_session() as session:
query = insert(Item)
query_args = []
for lower_num, upper_num in sequence_list:
query_args.append({
'project_id': project_id,
'lower_sequence_num': lower_num,
'upper_sequence_num': upper_num,
})
session.execute(query, query_args)
@classmethod
def delete(cls, item_id):
with new_session() as session:
session.query(Item).filter_by(id=item_id).delete()
@classmethod
def release(cls, item_id):
with new_session() as session:
item = session.query(Item).filter_by(id=item_id).first()
item.datetime_claimed = None
item.ip_address = None
item.username = None
@classmethod
def release_all(cls, project_id=None, old_date=None):
with new_session() as session:
query = session.query(Item)
if project_id:
query = query.filter_by(project_id=project_id)
if old_date:
query = query.filter(Item.datetime_claimed <= old_date)
query.update({
'datetime_claimed': None,
'ip_address': None,
'username': None,
})
@classmethod
def release_old(cls, project_id=None, autoqueue_only=False):
with new_session() as session:
# we could probably write this in one query
# but it would be non-portable across SQL dialects
projects = session.query(Project) \
.filter(Project.autorelease_time > 0)
if project_id:
projects = projects.filter_by(name=project_id)
if autoqueue_only:
projects = projects.filter_by(autoqueue=True)
for project in projects:
min_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=project.autorelease_time)
query = session.query(Item) \
.filter(Item.datetime_claimed <= min_time, Item.project == project)
query.update({
'datetime_claimed': None,
'ip_address': None,
'username': None,
})
@classmethod
def delete_all(cls, project_id):
with new_session() as session:
session.query(Item).filter_by(project_id=project_id).delete()
class BlockedUser(Base):
'''Blocked IP addresses or usernames.'''
__tablename__ = 'blocked_users'
username = Column(String, primary_key=True)
note = Column(String)
@classmethod
def block_username(cls, username, note=None):
with new_session() as session:
session.add(BlockedUser(username=username, note=note))
@classmethod
def unblock_username(cls, username):
with new_session() as session:
session.query(BlockedUser).filter_by(username=username).delete()
@classmethod
def is_username_blocked(cls, *username):
with new_session() as session:
query = select([BlockedUser.username])\
.where(BlockedUser.username.in_(username))
result = session.execute(query).first()
if result:
return True
@classmethod
def all_blocked_usernames(cls):
with new_session() as session:
names = session.query(BlockedUser.username)
return list([row[0] for row in names])
class Result(Base):
'''Unshortend URL.'''
__tablename__ = 'results'
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('projects.name'), nullable=False, index=True)
project = relationship('Project')
shortcode = Column(String, nullable=False)
url = Column(String, nullable=False)
encoding = Column(String, nullable=False)
datetime = Column(DateTime)
@classmethod
def has_results(cls):
with new_session() as session:
result = session.query(Result.id).first()
return bool(result)
@classmethod
def get_count(cls):
with new_session() as session:
return (session.query(func.max(Result.id)).scalar() or 0) \
- (session.query(func.min(Result.id)).scalar() or 0)
@classmethod
def get_results(cls, offset_id=0, limit=1000, project_id=None):
with new_session() as session:
if int(offset_id) == 0:
offset_id = session.query(func.max(Result.id)).scalar() or 0
rows = session.query(
Result.id, Result.project_id, Result.shortcode,
Result.url, Result.encoding, Result.datetime
) \
.filter(Result.id <= int(offset_id))
if project_id is not None and project_id != 'None':
rows = rows.filter(Result.project_id == project_id)
alphabet = Project.get_plain(project_id).alphabet
else:
alphabet = None
rows = rows.order_by(Result.id.desc()).limit(int(limit))
for row in rows:
ans = {
'id': row[0],
'project_id': row[1],
'shortcode': row[2],
'url': row[3],
'encoding': row[4],
'datetime': row[5]
}
if alphabet:
ans['seq_num'] = str_to_int(row[2], alphabet)
yield ans
class ErrorReport(Base):
'''Error report.'''
__tablename__ = 'error_reports'
id = Column(Integer, primary_key=True)
item_id = Column(Integer, ForeignKey('items.id'), nullable=False)
item = relationship('Item')
message = Column(String, nullable=False)
datetime = Column(DateTime, nullable=False,
default=datetime.datetime.utcnow)
def to_dict(self):
ans = {x.key:x.value for x in object_state(self).attrs}
ans.update({
'project': self.item.project_id if self.item else None,
})
return ans
@classmethod
def get_count(cls):
with new_session() as session:
min_id = session.query(func.min(ErrorReport.id)).scalar() or 0
max_id = session.query(func.max(ErrorReport.id)).scalar() or 0
return max_id - min_id
@classmethod
def all_reports(cls, limit=100, offset_id=None, project_id=None):
with new_session() as session:
reports = session.query(ErrorReport)
if offset_id:
reports = reports.filter(ErrorReport.id > offset_id)
if project_id is not None and project_id != 'None':
reports = reports.join(Item).filter(Item.project_id == project_id)
reports = reports.limit(limit)
return list(report.to_dict() for report in reports)
@classmethod
def delete_all(cls):
with new_session() as session:
session.query(ErrorReport.id).delete()
@classmethod
def delete_one(cls, report_id):
with new_session() as session:
query = delete(ErrorReport).where(ErrorReport.id == report_id)
session.execute(query)
@classmethod
def delete_orphaned(cls):
with new_session() as session:
subquery = select([ErrorReport.id])\
.where(ErrorReport.item_id == Item.id)\
.limit(1)
query = delete(ErrorReport).where(~exists(subquery))
session.execute(query)
class Budget(object):
'''Budget calculator to help manage available items.
Warning: This class assumes the application is single instance.
'''
projects = {}
@classmethod
def calculate_budgets(cls):
cls.projects = {}
with new_session() as session:
query = session.query(
Project.name, Project.max_num_items,
Project.min_client_version, Project.min_version,
Project.max_num_items
).filter_by(enabled=True)
for row in query:
(name, max_num_items, min_client_version, min_version,
max_num_items) = row
cls.projects[name] = {
'max_num_items': max_num_items,
'min_client_version': min_client_version,
'min_version': min_version,
'items': 0,
'claims': 0,
'ip_addresses': set(),
}
query = session.query(Item.project_id, Item.ip_address)
for row in query:
project_id, ip_address = row
if project_id not in cls.projects:
continue
project_info = cls.projects[project_id]
project_info['items'] += 1
if ip_address:
project_info['ip_addresses'].add(ip_address)
project_info['claims'] += 1
@classmethod
def get_available_project(cls, ip_address, version, client_version):
project_names = list(cls.projects.keys())
random.shuffle(project_names)
for project_id in project_names:
project_info = cls.projects[project_id]
if ip_address not in project_info['ip_addresses'] and \
version >= project_info['min_version'] and \
client_version >= project_info['min_client_version'] and \
project_info['claims'] <= project_info['items'] and \
project_info['claims'] < project_info['max_num_items']:
return (project_id, project_info['claims'],
project_info['items'], project_info['max_num_items'])
@classmethod
def is_client_outdated(cls, version, client_version):
if not cls.projects:
return
max_version = max(project['min_version']
for project in cls.projects.values())
max_client_version = max(project['min_client_version']
for project in cls.projects.values())
if version < max_version or client_version < max_client_version:
return max_version, max_client_version
@classmethod
def is_claims_full(cls, ip_address):
return cls.projects and all(ip_address in project['ip_addresses']
for project in cls.projects.values())
@classmethod
def check_out(cls, project_id, ip_address, new_item=False):
assert project_id
assert ip_address
project_info = cls.projects[project_id]
project_info['claims'] += 1
if new_item:
project_info['items'] += 1
project_info['ip_addresses'].add(ip_address)
@classmethod
def check_in(cls, project_id, ip_address):
assert project_id
assert ip_address
if project_id not in cls.projects:
# Project was recently disabled but the job hasn't come back
# yet. Should be safe to ignore.
return
project_info = cls.projects[project_id]
project_info['claims'] -= 1
project_info['items'] -= 1
project_info['ip_addresses'].remove(ip_address)
def make_hash(plaintext, salt):
key = salt
msg = plaintext.encode('ascii')
return hmac.new(key, msg).digest()
def new_salt():
return os.urandom(16)
def new_tamper_key():
return base64.b16encode(os.urandom(16)).decode('ascii')
def deadman_checks():
if ErrorReport.get_count() > DEADMAN_MAX_ERROR_REPORTS:
return '<div class="alert btn-danger">Too many error reports! Figure out what went wrong.</div>'
if Result.get_count() > DEADMAN_MAX_RESULTS:
return '<div class="alert btn-danger">Too many results! Run the export script.</div>'
return ''
def checkout_item(username, ip_address, version=-1, client_version=-1):
assert version is not None
assert client_version is not None
check_min_version_overrides(version, client_version)
if deadman_checks():
raise NoResourcesAvailable()
available = Budget.get_available_project(
ip_address, version, client_version
)
if available:
project_id, num_claims, num_items, max_num_items = available
with new_session() as session:
if num_claims >= num_items and num_items < max_num_items:
project = session.query(Project).get(project_id)
if project.autoqueue:
item_count = project.num_count_per_item
upper_sequence_num = project.lower_sequence_num + item_count - 1
item = Item(
project=project,
lower_sequence_num=project.lower_sequence_num,
upper_sequence_num=upper_sequence_num,
)
new_item = True
project.lower_sequence_num = upper_sequence_num + 1
session.add(item)
else:
item = None
new_item = None
else:
item = session.query(Item) \
.filter_by(username=None) \
.filter_by(project_id=project_id) \
.first()
new_item = False
if item:
item.datetime_claimed = datetime.datetime.utcnow()
item.tamper_key = new_tamper_key()
item.username = username
item.ip_address = ip_address
# Item should be committed now to generate ID for
# newly generated items
session.commit()
Budget.check_out(project_id, ip_address, new_item=new_item)
return item.to_dict()
else:
raise NoItemAvailable()
else:
if Budget.is_claims_full(ip_address):
raise FullClaim()
else:
outdated = Budget.is_client_outdated(version, client_version)
if outdated:
current_version, current_client_version = outdated
raise UpdateClient(
version=version,
client_version=client_version,
current_version=current_version,
current_client_version=current_client_version
)
else:
raise NoItemAvailable()
def checkin_item(item_id, tamper_key, results):
item_stat = {
'project': '',
'username': '',
'scanned': 0,
'found': len(results)
}
with new_session() as session:
row = session.query(
Item.project_id, Item.username, Item.upper_sequence_num,
Item.lower_sequence_num, Item.ip_address, Item.datetime_claimed
) \
.filter_by(id=item_id, tamper_key=tamper_key).first()
if not row:
raise InvalidClaim()
(project_id, username, upper_sequence_num, lower_sequence_num,
ip_address, datetime_claimed) = row
item_stat['project'] = project_id
item_stat['username'] = username
item_stat['scanned'] = upper_sequence_num - lower_sequence_num + 1
item_stat['started'] = datetime_claimed.replace(
tzinfo=datetime.timezone.utc).timestamp()
query_args = []
# tz instead of utcnow() for Unix timestamp in UTC instead of local
time = datetime.datetime.now(datetime.timezone.utc)
item_stat['finished'] = time.timestamp()
for shortcode in results.keys():
url = results[shortcode]['url']
encoding = results[shortcode]['encoding']
query_args.append({
'project_id': project_id,
'shortcode': shortcode,
'url': url,
'encoding': encoding,
'datetime': time
})
if len(query_args) > 0:
query = insert(Result)
session.execute(query, query_args)
session.execute(delete(Item).where(Item.id == item_id))
Budget.check_in(project_id, ip_address)
if Stats.instance:
Stats.instance.update(item_stat)
return item_stat
def report_error(item_id, tamper_key, message):
with new_session() as session:
item = session.query(Item).filter_by(id=item_id, tamper_key=tamper_key).first()
if not item:
raise InvalidClaim()
error_report = ErrorReport(item_id=item_id, message=message)
session.add(error_report)
def check_min_version_overrides(version, client_version):
if version < MIN_VERSION_OVERRIDE or client_version < MIN_CLIENT_VERSION_OVERRIDE:
raise UpdateClient(
version=version,
client_version=client_version,
current_version=MIN_VERSION_OVERRIDE,
current_client_version=MIN_CLIENT_VERSION_OVERRIDE
)
def get_git_hash():
try:
return subprocess.check_output(
['git', 'rev-parse', 'HEAD'],
cwd=os.path.dirname(__file__)).strip()
except (subprocess.CalledProcessError, OSError) as error:
return str(error)
def compare_digest(value_1, value_2):
if len(value_1) != len(value_2):
return False
iterable = [a == b for a, b in zip(value_1, value_2)]
ok = True
for result in iterable:
ok &= result
return ok
| mit |
codervince/flashingredlight | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| mit |
arifsetiawan/edx-platform | common/djangoapps/cors_csrf/tests/test_views.py | 150 | 2397 | """Tests for cross-domain request views. """
import json
from django.test import TestCase
from django.core.urlresolvers import reverse, NoReverseMatch
import ddt
from config_models.models import cache
from cors_csrf.models import XDomainProxyConfiguration
@ddt.ddt
class XDomainProxyTest(TestCase):
"""Tests for the xdomain proxy end-point. """
def setUp(self):
"""Clear model-based config cache. """
super(XDomainProxyTest, self).setUp()
try:
self.url = reverse('xdomain_proxy')
except NoReverseMatch:
self.skipTest('xdomain_proxy URL is not configured')
cache.clear()
def test_xdomain_proxy_disabled(self):
self._configure(False)
response = self._load_page()
self.assertEqual(response.status_code, 404)
@ddt.data(None, [' '], [' ', ' '])
def test_xdomain_proxy_enabled_no_whitelist(self, whitelist):
self._configure(True, whitelist=whitelist)
response = self._load_page()
self.assertEqual(response.status_code, 404)
@ddt.data(
(['example.com'], ['example.com']),
(['example.com', 'sub.example.com'], ['example.com', 'sub.example.com']),
([' example.com '], ['example.com']),
([' ', 'example.com'], ['example.com']),
)
@ddt.unpack
def test_xdomain_proxy_enabled_with_whitelist(self, whitelist, expected_whitelist):
self._configure(True, whitelist=whitelist)
response = self._load_page()
self._check_whitelist(response, expected_whitelist)
def _configure(self, is_enabled, whitelist=None):
"""Enable or disable the end-point and configure the whitelist. """
config = XDomainProxyConfiguration.current()
config.enabled = is_enabled
if whitelist:
config.whitelist = "\n".join(whitelist)
config.save()
cache.clear()
def _load_page(self):
"""Load the end-point. """
return self.client.get(reverse('xdomain_proxy'))
def _check_whitelist(self, response, expected_whitelist):
"""Verify that the domain whitelist is rendered on the page. """
rendered_whitelist = json.dumps({
domain: '*'
for domain in expected_whitelist
})
self.assertContains(response, 'xdomain.min.js')
self.assertContains(response, rendered_whitelist)
| agpl-3.0 |
adrienbrault/home-assistant | tests/components/homekit_controller/specific_devices/test_koogeek_ls1.py | 5 | 3555 | """Make sure that existing Koogeek LS1 support isn't broken."""
from datetime import timedelta
from unittest import mock
from aiohomekit.exceptions import AccessoryDisconnectedError, EncryptionError
from aiohomekit.testing import FakePairing
import pytest
from homeassistant.components.light import SUPPORT_BRIGHTNESS, SUPPORT_COLOR
from homeassistant.helpers import device_registry as dr, entity_registry as er
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
LIGHT_ON = ("lightbulb", "on")
async def test_koogeek_ls1_setup(hass):
"""Test that a Koogeek LS1 can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "koogeek_ls1.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = er.async_get(hass)
# Assert that the entity is correctly added to the entity registry
entry = entity_registry.async_get("light.koogeek_ls1_20833f")
assert entry.unique_id == "homekit-AAAA011111111111-7"
helper = Helper(
hass, "light.koogeek_ls1_20833f", pairing, accessories[0], config_entry
)
state = await helper.poll_and_get_state()
# Assert that the friendly name is detected correctly
assert state.attributes["friendly_name"] == "Koogeek-LS1-20833F"
# Assert that all optional features the LS1 supports are detected
assert state.attributes["supported_features"] == (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR
)
device_registry = dr.async_get(hass)
device = device_registry.async_get(entry.device_id)
assert device.manufacturer == "Koogeek"
assert device.name == "Koogeek-LS1-20833F"
assert device.model == "LS1"
assert device.sw_version == "2.2.15"
assert device.via_device_id is None
@pytest.mark.parametrize("failure_cls", [AccessoryDisconnectedError, EncryptionError])
async def test_recover_from_failure(hass, utcnow, failure_cls):
"""
Test that entity actually recovers from a network connection drop.
See https://github.com/home-assistant/core/issues/18949
"""
accessories = await setup_accessories_from_file(hass, "koogeek_ls1.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
helper = Helper(
hass, "light.koogeek_ls1_20833f", pairing, accessories[0], config_entry
)
# Set light state on fake device to off
helper.characteristics[LIGHT_ON].set_value(False)
# Test that entity starts off in a known state
state = await helper.poll_and_get_state()
assert state.state == "off"
# Set light state on fake device to on
helper.characteristics[LIGHT_ON].set_value(True)
# Test that entity remains in the same state if there is a network error
next_update = dt_util.utcnow() + timedelta(seconds=60)
with mock.patch.object(FakePairing, "get_characteristics") as get_char:
get_char.side_effect = failure_cls("Disconnected")
state = await helper.poll_and_get_state()
assert state.state == "off"
chars = get_char.call_args[0][0]
assert set(chars) == {(1, 8), (1, 9), (1, 10), (1, 11)}
# Test that entity changes state when network error goes away
next_update += timedelta(seconds=60)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = await helper.poll_and_get_state()
assert state.state == "on"
| mit |
danielneis/osf.io | tests/framework_tests/test_utils.py | 31 | 1575 | import unittest # noqa
from nose.tools import * # noqa
from modularodm import Q
from tests.base import DbTestCase
from tests import factories
from framework.mongo.utils import get_or_http_error, autoload
from framework.exceptions import HTTPError
from website.models import Node
class MongoUtilsTestCase(DbTestCase):
def test_get_or_http_error_by_pk_found(self):
n = factories.NodeFactory()
found = get_or_http_error(Node, n._id)
assert_equal(found, n)
def test_get_or_http_error_by_pk_not_found(self):
with assert_raises(HTTPError):
get_or_http_error(Node, 'blah')
def test_get_or_http_error_by_query_found(self):
n = factories.NodeFactory()
found = get_or_http_error(
Node,
(Q('title', 'eq', n.title) & Q('_id', 'eq', n._id))
)
assert_equal(found, n)
def test_get_or_http_error_by_query_not_found(self):
with assert_raises(HTTPError):
get_or_http_error(Node, Q('_id', 'eq', 'blah'))
def test_get_or_http_error_by_query_not_unique(self):
title = 'TITLE'
factories.NodeFactory(title=title)
factories.NodeFactory(title=title)
with assert_raises(HTTPError):
get_or_http_error(Node, Q('title', 'eq', title))
def test_autoload(self):
target = factories.NodeFactory()
def fn(node, *args, **kwargs):
return node
wrapped = autoload(Node, 'node_id', 'node', fn)
found = wrapped(node_id=target._id)
assert_equal(found, target)
| apache-2.0 |
Omhen/rasppi-caldera | cycle.py | 1 | 3169 | import ConfigParser
import logging
import os
import os.path
import threading
import time
from datetime import datetime
from config.flags import Status, Watching
import RPIO
import config.flags as flags
from watcher import Watcher
CONFIG_FILE = os.path.join(os.path.dirname(__file__), 'config/all.conf')
OUT_ENDLESS_FULL_LOAD = getattr(flags, 'OUT_ENDLESS_FULL_LOAD')
OUT_ENDLESS_REGULATED_LOAD = getattr(flags, 'OUT_ENDLESS_REGULATED_LOAD')
OUT_START = getattr(flags, 'OUT_START')
OUT_REGULATED_SPEED = getattr(flags, 'OUT_REGULATED_SPEED')
OUT_CLEANUP_SPEED = getattr(flags, 'OUT_CLEANUP_SPEED')
OUT_ALARM = getattr(flags, 'OUT_ALARM')
logger = logging.getLogger('')
def activate_load():
RPIO.output(OUT_ENDLESS_FULL_LOAD, False)
def activate_regulated_load():
RPIO.output(OUT_ENDLESS_FULL_LOAD, True)
RPIO.output(OUT_ENDLESS_REGULATED_LOAD, False)
RPIO.output(OUT_START, False)
RPIO.output(OUT_REGULATED_SPEED, False)
def deactivate_start_and_load():
RPIO.output(OUT_ENDLESS_FULL_LOAD, True)
RPIO.output(OUT_ENDLESS_REGULATED_LOAD, True)
RPIO.output(OUT_START, True)
RPIO.output(OUT_REGULATED_SPEED, True)
class Cycle(object):
def __init__(self):
self.status = Status.STOPPED
self.runner = threading.currentThread()
self.start_time = None
config = ConfigParser.SafeConfigParser()
config.read(CONFIG_FILE)
self.CLEANUP_TIME = config.getint('Times', 'cleanup_time')
self.MAX_START_TIME = config.getint('Times', 'max_start_time')
self.FULL_LOAD_TIME = config.getint('Times', 'full_load_time')
self.watcher = Watcher(self)
def start(self):
self.runner = threading.current_thread()
logger.info('Starting cycle on %s' % self.runner.name)
self.status = Status.RUNNING
self.start_time = datetime.now()
activate_load()
time.sleep(self.FULL_LOAD_TIME)
if self.status == Status.RUNNING:
activate_regulated_load()
def cleanup(self):
logger.info('Doing cleanup')
self.status = Status.CLEANING
deactivate_start_and_load()
RPIO.output(OUT_CLEANUP_SPEED, False)
time.sleep(self.CLEANUP_TIME)
if self.status == Status.CLEANING:
RPIO.output(OUT_CLEANUP_SPEED, True)
self.stop()
def stop(self):
logger.info('Stopping cycle on %s' % self.runner.name)
self.status = Status.STOPPED
if self.watcher.watching == Watching.YES:
self.watcher.stop_watch()
deactivate_start_and_load()
RPIO.output(OUT_CLEANUP_SPEED, True)
end_time = datetime.now()
delta = end_time - self.start_time
logger.info('Cycle run for %f seconds' % delta.total_seconds())
def start_watch(self):
self.watcher.start_watch()
def stop_watch(self):
self.watcher.stop_watch()
def alarm(self):
logger.info('Alarm being raised')
self.stop()
self.status = Status.ALARMED
RPIO.output(OUT_ALARM, False)
def deactivate_alarm(self):
self.status = Status.STOPPED
RPIO.output(OUT_ALARM, True)
| gpl-3.0 |
prescottprue/PiOpenLighting | python/ola/UIDTest.py | 2 | 2961 | #!/usr/bin/python
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# UIDTest.py
# Copyright (C) 2005-2009 Simon Newton
"""Test cases for the UID class."""
__author__ = 'nomis52@gmail.com (Simon Newton)'
import unittest
from ola.UID import UID, UIDOutOfRangeException
class UIDTest(unittest.TestCase):
def testBasic(self):
uid = UID(0x707a, 0x12345678)
self.assertEquals(0x707a, uid.manufacturer_id)
self.assertEquals(0x12345678, uid.device_id)
self.assertEquals('707a:12345678', str(uid))
self.assertTrue(uid > None)
uid2 = UID(0x707a, 0x12345679)
self.assertTrue(uid2 > uid)
uid3 = UID(0x7079, 0x12345678)
self.assertTrue(uid > uid3)
uids = [uid, uid2, uid3]
self.assertEquals([uid3, uid, uid2], sorted(uids))
vendorcast_uid = UID.VendorcastAddress(0x707a)
self.assertTrue(vendorcast_uid.IsBroadcast())
broadcast_uid = UID.AllDevices()
self.assertTrue(broadcast_uid.IsBroadcast())
def testFromString(self):
self.assertEquals(None, UID.FromString(''))
self.assertEquals(None, UID.FromString('abc'))
self.assertEquals(None, UID.FromString(':'))
self.assertEquals(None, UID.FromString('0:1:2'))
self.assertEquals(None, UID.FromString('12345:1234'))
uid = UID.FromString('00a0:12345678')
self.assertTrue(uid)
self.assertEquals(0x00a0, uid.manufacturer_id)
self.assertEquals(0x12345678, uid.device_id)
self.assertEquals('00a0:12345678', str(uid))
def testSorting(self):
u1 = UID(0x4845, 0xfffffffe)
u2 = UID(0x4845, 0x0000022e)
u3 = UID(0x4844, 0x0000022e)
u4 = UID(0x4846, 0x0000022e)
uids = [u1, u2, u3, u4]
uids.sort()
self.assertEquals([u3, u2, u1, u4], uids)
def testNextAndPrevious(self):
u1 = UID(0x4845, 0xfffffffe)
u2 = UID.NextUID(u1)
self.assertEquals('4845:ffffffff', str(u2))
u3 = UID.NextUID(u2)
self.assertEquals('4846:00000000', str(u3))
u4 = UID.PreviousUID(u3)
self.assertEquals(u2, u4)
u5 = UID.PreviousUID(u4)
self.assertEquals(u1, u5)
first_uid = UID(0, 0)
self.assertRaises(UIDOutOfRangeException, UID.PreviousUID, first_uid)
all_uids = UID.AllDevices()
self.assertRaises(UIDOutOfRangeException, UID.NextUID, all_uids)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
michelts/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/gis/sitemaps/kml.py | 482 | 2481 | from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
from django.contrib.gis.db.models.fields import GeometryField
from django.db import models
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Goes through the given sources and returns a 3-tuple of
the application label, module name, and field name of every
GeometryField encountered in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = models.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.module_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={'label' : obj[0],
'model' : obj[1],
'field_name': obj[2],
}
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
| gpl-3.0 |
GonzaloAfa/Termometro | login/views.py | 1 | 2578 | import json
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.template import RequestContext
from django.shortcuts import render_to_response, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout as auth_logout, login
from social.backends.oauth import BaseOAuth1, BaseOAuth2
from social.backends.google import GooglePlusAuth
from social.apps.django_app.utils import strategy
def logout(request):
"""Logs out user"""
auth_logout(request)
return render_to_response('home.html', {}, RequestContext(request))
def home(request):
"""Home view, displays login mechanism"""
if request.user.is_authenticated():
return redirect('done')
return render_to_response('home.html', {
'plus_id': getattr(settings, 'SOCIAL_AUTH_GOOGLE_PLUS_KEY', None)
}, RequestContext(request))
@login_required
def done(request):
"""Login complete view, displays user data"""
scope = ' '.join(GooglePlusAuth.DEFAULT_SCOPE)
return render_to_response('done.html', {
'user': request.user,
'plus_id': getattr(settings, 'SOCIAL_AUTH_GOOGLE_PLUS_KEY', None),
'plus_scope': scope
}, RequestContext(request))
def signup_email(request):
return render_to_response('email_signup.html', {}, RequestContext(request))
def validation_sent(request):
return render_to_response('validation_sent.html', {
'email': request.session.get('email_validation_address')
}, RequestContext(request))
def require_email(request):
if request.method == 'POST':
request.session['saved_email'] = request.POST.get('email')
backend = request.session['partial_pipeline']['backend']
return redirect('social:complete', backend=backend)
return render_to_response('email.html', RequestContext(request))
@strategy('social:complete')
def ajax_auth(request, backend):
backend = request.strategy.backend
if isinstance(backend, BaseOAuth1):
token = {
'oauth_token': request.REQUEST.get('access_token'),
'oauth_token_secret': request.REQUEST.get('access_token_secret'),
}
elif isinstance(backend, BaseOAuth2):
token = request.REQUEST.get('access_token')
else:
raise HttpResponseBadRequest('Wrong backend type')
user = request.strategy.backend.do_auth(token, ajax=True)
login(request, user)
data = {'id': user.id, 'username': user.username}
return HttpResponse(json.dumps(data), mimetype='application/json') | mit |
csoriano89/app-python-template | src/application.py | 1 | 2610 | import gi
gi.require_version('Notify', '0.7')
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio, GLib, Gdk, Notify
from gettext import gettext as _
from usage.log import *
from usage.window import Window
class Application(Gtk.Application):
def __repr__(self):
return '<Application>'
def __init__(self):
Gtk.Application.__init__(self,
application_id='org.gnome.Usage',
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE)
GLib.set_application_name(_("Usage"))
GLib.set_prgname('usage')
self._add_command_line_options()
self._window = None
def do_command_line(self, command_line):
options = command_line.get_options_dict()
if(options.contains("debug")):
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)s\t%(message)s",
datefmt="%H:%M:%S")
else:
logging.basicConfig(level=logging.WARN,
format="%(asctime)s %(levelname)s\t%(message)s",
datefmt="%H:%M:%S")
self.do_activate()
return -1
def build_app_menu(self):
actionEntries = [
('about', self.about),
('help', self.help),
('quit', self.quit),
]
for action, callback in actionEntries:
simpleAction = Gio.SimpleAction.new(action, None)
simpleAction.connect('activate', callback)
self.add_action(simpleAction)
def _add_command_line_options(self):
self.add_main_option("debug", b'd', GLib.OptionFlags.NONE, GLib.OptionArg.NONE,
_("Show debug output"), None)
def help(self, action, param):
Gtk.show_uri(None, "help:usage", Gdk.CURRENT_TIME)
def about(self, action, param):
builder = Gtk.Builder()
builder.add_from_resource('/org/gnome/Usage/ui/about-dialog.ui')
about = builder.get_object('about_dialog')
about.set_transient_for(self._window)
about.connect("response", self.about_response)
about.show()
def about_response(self, dialog, response):
dialog.destroy()
def do_startup(self):
Gtk.Application.do_startup(self)
self.build_app_menu()
@log
def quit(self, action=None, param=None):
self._window.destroy()
def do_activate(self):
if not self._window:
self._window = Window(self)
self._window.present()
| gpl-3.0 |
nomedeusuariodesconhecido/info3180-lab4 | venv/lib/python2.7/site-packages/setuptools/command/sdist.py | 385 | 7079 | from glob import glob
from distutils import log
import distutils.command.sdist as orig
import os
import sys
from setuptools.compat import PY3
from setuptools.utils import cs_path_exists
import pkg_resources
READMES = 'README', 'README.rst', 'README.txt'
_default_revctrl = list
def walk_revctrl(dirname=''):
"""Find all files under revision control"""
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
for item in ep.load()(dirname):
yield item
class sdist(orig.sdist):
"""Smart sdist that finds anything supported by revision control"""
user_options = [
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
negative_opt = {}
def run(self):
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
self.filelist = ei_cmd.filelist
self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
self.check_readme()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Call check_metadata only if no 'check' command
# (distutils <= 2.6)
import distutils.command
if 'check' not in distutils.command.__all__:
self.check_metadata()
self.make_distribution()
dist_files = getattr(self.distribution, 'dist_files', [])
for file in self.archive_files:
data = ('sdist', '', file)
if data not in dist_files:
dist_files.append(data)
def __read_template_hack(self):
# This grody hack closes the template file (MANIFEST.in) if an
# exception occurs during read_template.
# Doing so prevents an error when easy_install attempts to delete the
# file.
try:
orig.sdist.read_template(self)
except:
_, _, tb = sys.exc_info()
tb.tb_next.tb_frame.f_locals['template'].close()
raise
# Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
# has been fixed, so only override the method if we're using an earlier
# Python.
has_leaky_handle = (
sys.version_info < (2, 7, 2)
or (3, 0) <= sys.version_info < (3, 1, 4)
or (3, 2) <= sys.version_info < (3, 2, 1)
)
if has_leaky_handle:
read_template = __read_template_hack
def add_defaults(self):
standards = [READMES,
self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if cs_path_exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = list(filter(cs_path_exists, glob(pattern)))
if files:
self.filelist.extend(files)
# getting python files
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
# This functionality is incompatible with include_package_data, and
# will in fact create an infinite recursion if include_package_data
# is True. Use of include_package_data will imply that
# distutils-style automatic handling of package_data is disabled
if not self.distribution.include_package_data:
for _, src_dir, _, filenames in build_py.data_files:
self.filelist.extend([os.path.join(src_dir, filename)
for filename in filenames])
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def check_readme(self):
for f in READMES:
if os.path.exists(f):
return
else:
self.warn(
"standard file not found: should have one of " +
', '.join(READMES)
)
def make_release_tree(self, base_dir, files):
orig.sdist.make_release_tree(self, base_dir, files)
# Save any egg_info command line options used to create this sdist
dest = os.path.join(base_dir, 'setup.cfg')
if hasattr(os, 'link') and os.path.exists(dest):
# unlink and re-copy, since it might be hard-linked, and
# we don't want to change the source version
os.unlink(dest)
self.copy_file('setup.cfg', dest)
self.get_finalized_command('egg_info').save_version_info(dest)
def _manifest_is_not_generated(self):
# check for special comment used in 2.7.1 and higher
if not os.path.isfile(self.manifest):
return False
fp = open(self.manifest, 'rbU')
try:
first_line = fp.readline()
finally:
fp.close()
return (first_line !=
'# file GENERATED by distutils, do NOT edit\n'.encode())
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rbU')
for line in manifest:
# The manifest must contain UTF-8. See #303.
if PY3:
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
| mit |
balloob/home-assistant | tests/components/mfi/test_sensor.py | 3 | 6305 | """The tests for the mFi sensor platform."""
from mficlient.client import FailedToLogin
import pytest
import requests
import homeassistant.components.mfi.sensor as mfi
import homeassistant.components.sensor as sensor_component
from homeassistant.const import TEMP_CELSIUS
from homeassistant.setup import async_setup_component
import tests.async_mock as mock
PLATFORM = mfi
COMPONENT = sensor_component
THING = "sensor"
GOOD_CONFIG = {
"sensor": {
"platform": "mfi",
"host": "foo",
"port": 6123,
"username": "user",
"password": "pass",
"ssl": True,
"verify_ssl": True,
}
}
async def test_setup_missing_config(hass):
"""Test setup with missing configuration."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = {"sensor": {"platform": "mfi"}}
assert await async_setup_component(hass, "sensor", config)
assert not mock_client.called
async def test_setup_failed_login(hass):
"""Test setup with login failure."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
mock_client.side_effect = FailedToLogin
assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None)
async def test_setup_failed_connect(hass):
"""Test setup with connection failure."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
mock_client.side_effect = requests.exceptions.ConnectionError
assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None)
async def test_setup_minimum(hass):
"""Test setup with minimum configuration."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
del config[THING]["port"]
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6443, use_tls=True, verify=True
)
async def test_setup_with_port(hass):
"""Test setup with port."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
config[THING]["port"] = 6123
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6123, use_tls=True, verify=True
)
async def test_setup_with_tls_disabled(hass):
"""Test setup without TLS."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
del config[THING]["port"]
config[THING]["ssl"] = False
config[THING]["verify_ssl"] = False
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6080, use_tls=False, verify=False
)
async def test_setup_adds_proper_devices(hass):
"""Test if setup adds devices."""
with mock.patch(
"homeassistant.components.mfi.sensor.MFiClient"
) as mock_client, mock.patch(
"homeassistant.components.mfi.sensor.MfiSensor"
) as mock_sensor:
ports = {
i: mock.MagicMock(model=model) for i, model in enumerate(mfi.SENSOR_MODELS)
}
ports["bad"] = mock.MagicMock(model="notasensor")
mock_client.return_value.get_devices.return_value = [
mock.MagicMock(ports=ports)
]
assert await async_setup_component(hass, COMPONENT.DOMAIN, GOOD_CONFIG)
await hass.async_block_till_done()
for ident, port in ports.items():
if ident != "bad":
mock_sensor.assert_any_call(port, hass)
assert mock.call(ports["bad"], hass) not in mock_sensor.mock_calls
@pytest.fixture(name="port")
def port_fixture():
"""Port fixture."""
return mock.MagicMock()
@pytest.fixture(name="sensor")
def sensor_fixture(hass, port):
"""Sensor fixture."""
return mfi.MfiSensor(port, hass)
async def test_name(port, sensor):
"""Test the name."""
assert port.label == sensor.name
async def test_uom_temp(port, sensor):
"""Test the UOM temperature."""
port.tag = "temperature"
assert TEMP_CELSIUS == sensor.unit_of_measurement
async def test_uom_power(port, sensor):
"""Test the UOEM power."""
port.tag = "active_pwr"
assert sensor.unit_of_measurement == "Watts"
async def test_uom_digital(port, sensor):
"""Test the UOM digital input."""
port.model = "Input Digital"
assert sensor.unit_of_measurement == "State"
async def test_uom_unknown(port, sensor):
"""Test the UOM."""
port.tag = "balloons"
assert sensor.unit_of_measurement == "balloons"
async def test_uom_uninitialized(port, sensor):
"""Test that the UOM defaults if not initialized."""
type(port).tag = mock.PropertyMock(side_effect=ValueError)
assert sensor.unit_of_measurement == "State"
async def test_state_digital(port, sensor):
"""Test the digital input."""
port.model = "Input Digital"
port.value = 0
assert mfi.STATE_OFF == sensor.state
port.value = 1
assert mfi.STATE_ON == sensor.state
port.value = 2
assert mfi.STATE_ON == sensor.state
async def test_state_digits(port, sensor):
"""Test the state of digits."""
port.tag = "didyoucheckthedict?"
port.value = 1.25
with mock.patch.dict(mfi.DIGITS, {"didyoucheckthedict?": 1}):
assert sensor.state == 1.2
with mock.patch.dict(mfi.DIGITS, {}):
assert sensor.state == 1.0
async def test_state_uninitialized(port, sensor):
"""Test the state of uninitialized sensorfs."""
type(port).tag = mock.PropertyMock(side_effect=ValueError)
assert mfi.STATE_OFF == sensor.state
async def test_update(port, sensor):
"""Test the update."""
sensor.update()
assert port.refresh.call_count == 1
assert port.refresh.call_args == mock.call()
| apache-2.0 |
akirk/youtube-dl | youtube_dl/extractor/youjizz.py | 148 | 2297 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TEST = {
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': '07e15fa469ba384c7693fd246905547c',
'info_dict': {
'id': '2189178',
'ext': 'flv',
"title": "Zeichentrick 1",
"age_limit": 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
embed_page_url = self._search_regex(
r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
webpage, 'embed page')
webpage = self._download_webpage(
embed_page_url, video_id, note='downloading embed page')
# Get the video URL
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
if m_playlist is not None:
playlist_url = m_playlist.group('playlist')
playlist_page = self._download_webpage(playlist_url, video_id,
'Downloading playlist page')
m_levels = list(re.finditer(r'<level bitrate="(\d+?)" file="(.*?)"', playlist_page))
if len(m_levels) == 0:
raise ExtractorError('Unable to extract video url')
videos = [(int(m.group(1)), m.group(2)) for m in m_levels]
(_, video_url) = sorted(videos)[0]
video_url = video_url.replace('%252F', '%2F')
else:
video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'player_url': embed_page_url,
'age_limit': age_limit,
}
| unlicense |
tuxfux-hlp-notes/python-batches | archieves/batch-60/modules/myenv/lib/python2.7/site-packages/setuptools/dist.py | 148 | 32599 | __all__ = ['Distribution']
import re
import os
import sys
import warnings
import distutils.log
import distutils.core
import distutils.cmd
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.depends import Require
from setuptools.compat import numeric_types, basestring
import pkg_resources
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
raise DistutilsSetupError(
"%r must be a boolean value (got %r)" % (attr,value)
)
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError,ValueError):
raise DistutilsSetupError(
"%r must be a string or list of strings "
"containing valid project/version requirement specifiers" % (attr,)
)
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError:
e = sys.exc_info()[1]
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs.pop('setup_requires'))
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numeric_types):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
from pkg_resources import working_set, parse_requirements
for dist in working_set.resolve(
parse_requirements(requires), installer=self.fetch_build_egg,
replace_conflicting=True
):
working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
cmd = easy_install(
dist, args=["x"], install_dir=os.curdir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
cmdclass = ep.load(False) # don't require extras, we're not running
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if sys.version_info < (3,) or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See http://bitbucket.org/pypa/setuptools/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
| gpl-3.0 |
tmm1/home-assistant | tests/util/test_dt.py | 28 | 4481 | """
tests.test_util
~~~~~~~~~~~~~~~~~
Tests Home Assistant date util methods.
"""
# pylint: disable=too-many-public-methods
import unittest
from datetime import datetime, timedelta
import homeassistant.util.dt as dt_util
TEST_TIME_ZONE = 'America/Los_Angeles'
class TestDateUtil(unittest.TestCase):
""" Tests util date methods. """
def setUp(self):
self.orig_default_time_zone = dt_util.DEFAULT_TIME_ZONE
def tearDown(self):
dt_util.set_default_time_zone(self.orig_default_time_zone)
def test_get_time_zone_retrieves_valid_time_zone(self):
""" Test getting a time zone. """
time_zone = dt_util.get_time_zone(TEST_TIME_ZONE)
self.assertIsNotNone(time_zone)
self.assertEqual(TEST_TIME_ZONE, time_zone.zone)
def test_get_time_zone_returns_none_for_garbage_time_zone(self):
""" Test getting a non existing time zone. """
time_zone = dt_util.get_time_zone("Non existing time zone")
self.assertIsNone(time_zone)
def test_set_default_time_zone(self):
""" Test setting default time zone. """
time_zone = dt_util.get_time_zone(TEST_TIME_ZONE)
dt_util.set_default_time_zone(time_zone)
# We cannot compare the timezones directly because of DST
self.assertEqual(time_zone.zone, dt_util.now().tzinfo.zone)
def test_utcnow(self):
""" Test the UTC now method. """
self.assertAlmostEqual(
dt_util.utcnow().replace(tzinfo=None),
datetime.utcnow(),
delta=timedelta(seconds=1))
def test_now(self):
""" Test the now method. """
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
self.assertAlmostEqual(
dt_util.as_utc(dt_util.now()).replace(tzinfo=None),
datetime.utcnow(),
delta=timedelta(seconds=1))
def test_as_utc_with_naive_object(self):
utcnow = datetime.utcnow()
self.assertEqual(utcnow,
dt_util.as_utc(utcnow).replace(tzinfo=None))
def test_as_utc_with_utc_object(self):
utcnow = dt_util.utcnow()
self.assertEqual(utcnow, dt_util.as_utc(utcnow))
def test_as_utc_with_local_object(self):
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
localnow = dt_util.now()
utcnow = dt_util.as_utc(localnow)
self.assertEqual(localnow, utcnow)
self.assertNotEqual(localnow.tzinfo, utcnow.tzinfo)
def test_as_local_with_naive_object(self):
now = dt_util.now()
self.assertAlmostEqual(
now, dt_util.as_local(datetime.utcnow()),
delta=timedelta(seconds=1))
def test_as_local_with_local_object(self):
now = dt_util.now()
self.assertEqual(now, now)
def test_as_local_with_utc_object(self):
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
utcnow = dt_util.utcnow()
localnow = dt_util.as_local(utcnow)
self.assertEqual(localnow, utcnow)
self.assertNotEqual(localnow.tzinfo, utcnow.tzinfo)
def test_utc_from_timestamp(self):
""" Test utc_from_timestamp method. """
self.assertEqual(
datetime(1986, 7, 9, tzinfo=dt_util.UTC),
dt_util.utc_from_timestamp(521251200))
def test_datetime_to_str(self):
""" Test datetime_to_str. """
self.assertEqual(
"12:00:00 09-07-1986",
dt_util.datetime_to_str(datetime(1986, 7, 9, 12, 0, 0)))
def test_datetime_to_local_str(self):
""" Test datetime_to_local_str. """
self.assertEqual(
dt_util.datetime_to_str(dt_util.now()),
dt_util.datetime_to_local_str(dt_util.utcnow()))
def test_str_to_datetime_converts_correctly(self):
""" Test str_to_datetime converts strings. """
self.assertEqual(
datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC),
dt_util.str_to_datetime("12:00:00 09-07-1986"))
def test_str_to_datetime_returns_none_for_incorrect_format(self):
""" Test str_to_datetime returns None if incorrect format. """
self.assertIsNone(dt_util.str_to_datetime("not a datetime string"))
def test_strip_microseconds(self):
test_time = datetime(2015, 1, 1, microsecond=5000)
self.assertNotEqual(0, test_time.microsecond)
self.assertEqual(0, dt_util.strip_microseconds(test_time).microsecond)
| mit |
chiapas/sumatrapdf-new | ext/freetype2/src/tools/docmaker/formatter.py | 515 | 4962 | # Formatter (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
from sources import *
from content import *
from utils import *
# This is the base Formatter class. Its purpose is to convert
# a content processor's data into specific documents (i.e., table of
# contents, global index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example,
# the file tohtml.py contains the definition of the HtmlFormatter sub-class
# used to output -- you guessed it -- HTML.
#
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( index_sort )
def add_identifier( self, name, block ):
if self.identifiers.has_key( name ):
# duplicate name!
sys.stderr.write( \
"WARNING: duplicate definition for '" + name + "' in " + \
block.location() + ", previous definition in " + \
self.identifiers[name].location() + "\n" )
else:
self.identifiers[name] = block
#
# Formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# Formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# Formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
block = self.identifiers[name]
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof
| gpl-3.0 |
angvp/django-klingon | runtests.py | 1 | 2338 | #! /usr/bin/env python
from __future__ import print_function
import pytest
import sys
import os
import subprocess
PYTEST_ARGS = {
'default': ['tests',],
'fast': ['tests', '-q'],
}
FLAKE8_ARGS = ['klingon', 'tests', '--ignore=E501']
sys.path.append(os.path.dirname(__file__))
def exit_on_failure(ret, message=None):
if ret:
sys.exit(ret)
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
print('flake8 failed' if ret else 'flake8 passed')
return ret
def split_class_and_function(string):
class_string, function_string = string.split('.', 1)
return "%s and %s" % (class_string, function_string)
def is_function(string):
# `True` if it looks like a test function is included in the string.
return string.startswith('test_') or '.test_' in string
def is_class(string):
# `True` if first character is uppercase - assume it's a class name.
return string[0] == string[0].upper()
if __name__ == "__main__":
try:
sys.argv.remove('--nolint')
except ValueError:
run_flake8 = True
else:
run_flake8 = False
try:
sys.argv.remove('--lintonly')
except ValueError:
run_tests = True
else:
run_tests = False
try:
sys.argv.remove('--fast')
except ValueError:
style = 'default'
else:
style = 'fast'
run_flake8 = False
if len(sys.argv) > 1:
pytest_args = sys.argv[1:]
first_arg = pytest_args[0]
if first_arg.startswith('-'):
# `runtests.py [flags]`
pytest_args = ['tests'] + pytest_args
elif is_class(first_arg) and is_function(first_arg):
# `runtests.py TestCase.test_function [flags]`
expression = split_class_and_function(first_arg)
pytest_args = ['tests', '-k', expression] + pytest_args[1:]
elif is_class(first_arg) or is_function(first_arg):
# `runtests.py TestCase [flags]`
# `runtests.py test_function [flags]`
pytest_args = ['tests', '-k', pytest_args[0]] + pytest_args[1:]
else:
pytest_args = PYTEST_ARGS[style]
if run_tests:
exit_on_failure(pytest.main(pytest_args))
if run_flake8:
exit_on_failure(flake8_main(FLAKE8_ARGS))
| lgpl-3.0 |
ywcui1990/htmresearch | projects/capybara/datasets/SyntheticData/generate_synthetic_data.py | 9 | 5156 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate synthetic sequences using a pool of sequence motifs
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from sklearn import decomposition
import os
plt.figure()
# Generate a set of sequence motifs
def generateSequenceMotifs(numMotif, motifLength, seed=None):
if seed is not None:
np.random.seed(seed)
sequenceMotifs = np.random.randn(motifLength, motifLength)
pca = decomposition.PCA(n_components=numMotif)
pca.fit(sequenceMotifs)
sequenceMotifs = pca.components_
for i in range(numMotif):
sequenceMotifs[i, :] = sequenceMotifs[i, :]-min(sequenceMotifs[i, :])
sequenceMotifs[i, :] = sequenceMotifs[i, :]/max(sequenceMotifs[i, :])
return sequenceMotifs
def generateSequence(sequenceLength, useMotif, currentClass, sequenceMotifs):
motifLength = sequenceMotifs.shape[1]
sequence = np.zeros((sequenceLength + 20,))
motifState = np.zeros((sequenceLength + 20,))
randomLengthList = np.linspace(1, 10, 10).astype('int')
t = 0
while t < sequenceLength:
randomLength = np.random.choice(randomLengthList)
sequence[t:t + randomLength] = np.random.rand(randomLength)
motifState[t:t + randomLength] = -1
t += randomLength
motifIdx = np.random.choice(useMotif[currentClass])
sequence[t:t + motifLength] = sequenceMotifs[motifIdx]
motifState[t:t + motifLength] = motifIdx
t += motifLength
sequence = sequence[:sequenceLength]
motifState = motifState[:sequenceLength]
return sequence, motifState
def generateSequences(numSeq, numClass, sequenceLength, useMotif, sequenceMotifs):
trainData = np.zeros((numSeq, sequenceLength+1))
numSeqPerClass = numSeq/numClass
classList = []
for classIdx in range(numClass):
classList += [classIdx] * numSeqPerClass
for seq in range(numSeq):
currentClass = classList[seq]
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
trainData[seq, 0] = currentClass
trainData[seq, 1:] = sequence
return trainData
numMotif = 5
motifLength = 5
sequenceMotifs = generateSequenceMotifs(numMotif, 5, seed=42)
numTrain = 100
numTest = 100
numClass = 2
motifPerClass = 2
np.random.seed(2)
useMotif = {}
motifList = set(range(numMotif))
for classIdx in range(numClass):
useMotifForClass = []
for _ in range(motifPerClass):
useMotifForClass.append(np.random.choice(list(motifList)))
motifList.remove(useMotifForClass[-1])
useMotif[classIdx] = useMotifForClass
sequenceLength = 100
currentClass = 0
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
MotifColor = {}
colorList = ['r','g','b','c','m','y']
i = 0
for c in useMotif.keys():
for v in useMotif[c]:
MotifColor[v] = colorList[i]
i += 1
fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(20, 3 * 4))
for plti in xrange(4):
currentClass = [0 if plti < 2 else 1][0]
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
ax[plti].plot(sequence, 'k-')
startPatch = False
for t in range(len(motifState)):
if motifState[t] >= 0 and startPatch is False:
startPatchAt = t
startPatch = True
currentMotif = motifState[t]
if startPatch and (motifState[t] < 0):
endPatchAt = t-1
ax[plti].add_patch(
patches.Rectangle(
(startPatchAt, 0),
endPatchAt-startPatchAt, 1, alpha=0.5,
color=MotifColor[currentMotif]
)
)
startPatch = False
ax[plti].set_xlim([0, 100])
ax[plti].set_ylabel('class {}'.format(currentClass))
outputDir = 'Test1'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
trainData = generateSequences(
numTrain, numClass, sequenceLength, useMotif, sequenceMotifs)
testData = generateSequences(
numTest, numClass, sequenceLength, useMotif, sequenceMotifs)
np.savetxt('Test1/Test1_TRAIN', trainData, delimiter=',', fmt='%.10f')
np.savetxt('Test1/Test1_TEST', testData, delimiter=',', fmt='%.10f')
plt.savefig('Test1/Test1.png')
| agpl-3.0 |
wwj718/edx-platform | common/djangoapps/util/tests/test_date_utils.py | 55 | 7800 | # -*- coding: utf-8 -*-
"""
Tests for util.date_utils
"""
from datetime import datetime, timedelta, tzinfo
import unittest
import ddt
from mock import patch
from nose.tools import assert_equals, assert_false # pylint: disable=no-name-in-module
from pytz import UTC
from util.date_utils import (
get_default_time_display, get_time_display, almost_same_datetime,
strftime_localized,
)
def test_get_default_time_display():
assert_equals("", get_default_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals(
"Mar 12, 1992 at 15:03 UTC",
get_default_time_display(test_time))
def test_get_dflt_time_disp_notz():
test_time = datetime(1992, 3, 12, 15, 3, 30)
assert_equals(
"Mar 12, 1992 at 15:03 UTC",
get_default_time_display(test_time))
def test_get_time_disp_ret_empty():
assert_equals("", get_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("", get_time_display(test_time, ""))
def test_get_time_display():
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("dummy text", get_time_display(test_time, 'dummy text'))
assert_equals("Mar 12 1992", get_time_display(test_time, '%b %d %Y'))
assert_equals("Mar 12 1992 UTC", get_time_display(test_time, '%b %d %Y %Z'))
assert_equals("Mar 12 15:03", get_time_display(test_time, '%b %d %H:%M'))
def test_get_time_pass_through():
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time))
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time, None))
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time, "%"))
def test_get_time_display_coerce():
test_time_standard = datetime(1992, 1, 12, 15, 3, 30, tzinfo=UTC)
test_time_daylight = datetime(1992, 7, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("Jan 12, 1992 at 07:03 PST",
get_time_display(test_time_standard, None, coerce_tz="US/Pacific"))
assert_equals("Jan 12, 1992 at 15:03 UTC",
get_time_display(test_time_standard, None, coerce_tz="NONEXISTENTTZ"))
assert_equals("Jan 12 07:03",
get_time_display(test_time_standard, '%b %d %H:%M', coerce_tz="US/Pacific"))
assert_equals("Jul 12, 1992 at 08:03 PDT",
get_time_display(test_time_daylight, None, coerce_tz="US/Pacific"))
assert_equals("Jul 12, 1992 at 15:03 UTC",
get_time_display(test_time_daylight, None, coerce_tz="NONEXISTENTTZ"))
assert_equals("Jul 12 08:03",
get_time_display(test_time_daylight, '%b %d %H:%M', coerce_tz="US/Pacific"))
class NamelessTZ(tzinfo):
"""Static timezone for testing"""
def utcoffset(self, _dt):
return timedelta(hours=-3)
def dst(self, _dt):
return timedelta(0)
def test_get_default_time_display_no_tzname():
assert_equals("", get_default_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=NamelessTZ())
assert_equals(
"Mar 12, 1992 at 15:03-0300",
get_default_time_display(test_time))
def test_almost_same_datetime():
assert almost_same_datetime(
datetime(2013, 5, 3, 10, 20, 30),
datetime(2013, 5, 3, 10, 21, 29)
)
assert almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29),
timedelta(hours=1)
)
assert_false(
almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29)
)
)
assert_false(
almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29),
timedelta(minutes=10)
)
)
def fake_ugettext(translations):
"""
Create a fake implementation of ugettext, for testing.
"""
def _ugettext(text): # pylint: disable=missing-docstring
return translations.get(text, text)
return _ugettext
def fake_pgettext(translations):
"""
Create a fake implementation of pgettext, for testing.
"""
def _pgettext(context, text): # pylint: disable=missing-docstring
return translations.get((context, text), text)
return _pgettext
@ddt.ddt
class StrftimeLocalizedTest(unittest.TestCase):
"""
Tests for strftime_localized.
"""
@ddt.data(
("%Y", "2013"),
("%m/%d/%y", "02/14/13"),
("hello", "hello"),
(u'%Y년 %m월 %d일', u"2013년 02월 14일"),
("%a, %b %d, %Y", "Thu, Feb 14, 2013"),
("%I:%M:%S %p", "04:41:17 PM"),
("%A at %-I%P", "Thursday at 4pm"),
)
def test_usual_strftime_behavior(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
# strftime doesn't like Unicode, so do the work in UTF8.
self.assertEqual(expected, dtime.strftime(fmt.encode('utf8')).decode('utf8'))
@ddt.data(
("SHORT_DATE", "Feb 14, 2013"),
("LONG_DATE", "Thursday, February 14, 2013"),
("TIME", "04:41:17 PM"),
("DAY_AND_TIME", "Thursday at 4pm"),
("%x %X!", "Feb 14, 2013 04:41:17 PM!"),
)
def test_shortcuts(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Feb"): "XXfebXX",
("month name", "February"): "XXfebruaryXX",
("abbreviated weekday name", "Thu"): "XXthuXX",
("weekday name", "Thursday"): "XXthursdayXX",
("am/pm indicator", "PM"): "XXpmXX",
}))
@ddt.data(
("SHORT_DATE", "XXfebXX 14, 2013"),
("LONG_DATE", "XXthursdayXX, XXfebruaryXX 14, 2013"),
("DATE_TIME", "XXfebXX 14, 2013 at 16:41"),
("TIME", "04:41:17 XXpmXX"),
("%x %X!", "XXfebXX 14, 2013 04:41:17 XXpmXX!"),
)
def test_translated_words(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "date(%Y.%m.%d)",
"LONG_DATE_FORMAT": "date(%A.%Y.%B.%d)",
"DATE_TIME_FORMAT": "date(%Y.%m.%d@%H.%M)",
"TIME_FORMAT": "%Hh.%Mm.%Ss",
}))
@ddt.data(
("SHORT_DATE", "date(2013.02.14)"),
("Look: %x", "Look: date(2013.02.14)"),
("LONG_DATE", "date(Thursday.2013.February.14)"),
("DATE_TIME", "date(2013.02.14@16.41)"),
("TIME", "16h.41m.17s"),
("The time is: %X", "The time is: 16h.41m.17s"),
("%x %X", "date(2013.02.14) 16h.41m.17s"),
)
def test_translated_formats(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "oops date(%Y.%x.%d)",
"TIME_FORMAT": "oops %Hh.%Xm.%Ss",
}))
@ddt.data(
("SHORT_DATE", "Feb 14, 2013"),
("TIME", "04:41:17 PM"),
)
def test_recursion_protection(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@ddt.data(
"%",
"Hello%"
"%Y/%m/%d%",
)
def test_invalid_format_strings(self, fmt):
dtime = datetime(2013, 02, 14, 16, 41, 17)
with self.assertRaises(ValueError):
strftime_localized(dtime, fmt)
| agpl-3.0 |
vshymanskyy/Espruino | boards/STM32VLDISCOVERY.py | 8 | 2445 | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "STM32 VL Discovery",
'link' : [ "http://www.st.com/stm32-discovery" ],
'variables' : 500,
'binary_name' : 'espruino_%v_stm32vldiscovery.bin',
};
chip = {
'part' : "STM32F100RBT6",
'family' : "STM32F1",
'package' : "LQFP64",
'ram' : 8,
'flash' : 128,
'speed' : 24,
'usart' : 3,
'spi' : 2,
'i2c' : 2,
'adc' : 3,
'dac' : 0,
};
# left-right, or top-bottom order
board = {
'left' : [ 'GND', 'NC', '3.3', 'VBAT', 'C13', 'C14', 'C15', 'D0', 'D1', 'RST', 'C0', 'C1', 'C2', 'C3', 'A0', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'C4', 'C5', 'B0', 'B1', 'B2', 'GND' ],
'right' : [ 'GND', 'NC', '5V', 'B9', 'B8', 'BOOT', 'B7', 'B6', 'B5', 'B4', 'B3', 'D2', 'C12', 'C11', 'C10', 'A15', 'A14', 'A13', 'A12', 'A11', 'A10', 'A9', 'A8', 'C9', 'C8', 'C7', 'C6', 'GND' ],
'bottom' : [ 'B10','B11','B12','B13','B14','B15' ],
};
devices = {
'OSC' : { 'pin_1' : 'D0',
'pin_2' : 'D1' },
'OSC_RTC' : { 'pin_1' : 'C14',
'pin_2' : 'C15' },
'LED1' : { 'pin' : 'C9' },
'LED2' : { 'pin' : 'C8' },
'BTN1' : { 'pin' : 'A0' },
'JTAG' : {
'pin_MS' : 'A13',
'pin_CK' : 'A14',
'pin_DI' : 'A15'
},
};
board_css = """
#board {
width: 376px;
height: 750px;
left: 200px;
background-image: url(img/STM32VLDISCOVERY.jpg);
}
#boardcontainer {
height: 950px;
}
#left {
top: 40px;
right: 330px;
}
#right {
top: 40px;
left: 330px;
}
#bottom {
top: 710px;
left: 125px;
}
""";
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f103xb.csv', 6, 10, 11)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"])
| mpl-2.0 |
austinluong/fit-extract | fit_extract/util.py | 1 | 1626 | #!/usr/bin/env python
import glob
import os
def correctPath(path):
"""Fixes path name"""
return os.path.normpath(path) + '/'
def pathToName(filepath):
"""Returns an extensionless name from a file path
>>> pathFoo = 'C://Users//User//Folder//Subfolder//foo_C07.fit'
>>> path = 'C://Users//User//Folder//Subfolder//'
>>> pathToName(pathFoo)
'foo_C07'
"""
return os.path.splitext(os.path.basename(filepath))[0]
def nameToChannel(name):
"""Returns the channel from the name of a file
>>> name = 'foo_C07'
>>> nameToChannel(name)
'7'
>>> name = 'foo_C12'
>>> nameToChannel(name)
'12'
"""
channelString = name.split('_')[-1]
if channelString[1] == '0':
return channelString[-1]
return channelString[1:]
def makeCycleSortable(cycle):
"""Lets cycle number be easily sorted lexicographically
>>> makeCycleSortable(2)
'003'
>>> makeCycleSortable(43)
'044'
>>> makeCycleSortable(152)
'153'
"""
cycle += 1
if cycle < 10:
return '00' + str(cycle)
elif cycle < 100:
return '0' + str(cycle)
else:
return str(cycle)
def getFitFilePaths(path=''):
"""Returns a list of file paths with .fit extension"""
return glob.glob(path + '*.fit')
def ParamFromSearchParam(searchParam):
"""Get a parameter from a search parameter
>>> ParamFromSearchParam('R1 =')
'R1'
"""
return searchParam[:-2]
def SearchParamFromParam(param):
"""Get a search parameter from a parameter
>>> SearchParamFromParam('R1')
'R1 ='
"""
return param + ' ='
| unlicense |
luistorresm/sale-workflow | __unported__/pricelist_share_companies/__openerp__.py | 34 | 2999 | # -*- coding: utf-8 -*-
#
#
# Author: Joël Grand-Guillaume
# Copyright 2010 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Share pricelist between compagnies, not product',
'version': '1.0',
'category': 'Generic Modules/Projects & Services',
'description':
'''
In OpenERP, product prices (cost, list) are expressed in the currency of
the price_type (by default the same than your company currency).
The idea here is to have the same products between compagnies (with each one
their own currency through different price_type and different costs) but
only one pricelist for all. For that purpose, we add a company_id on price_type
object and a rule to separate them for each company. This way,
the price computation of pricelist will take the right price_type currency as
based price.
Concretely, to have a different cost price for a second company, you have to :
- Create a new standard price on product.template
- Create a new 'Price Type' on this new field, with the desired currency and
assigned to the new currency
- Assign the existing 'Cost Price' to your main company
- On the setup of each company, in the 'Configuration''s Tab, select
the product field used for the cost
The Price Type used is the first one found for the cost field configured on
the company. To ensure the right Price Type
is selected, you have to put the company on the Price Types, and according to
the security rule created, you will have access
only to the right Price Type.
Example:
I create a product A. it has 2 fields for cost prices : Cost Price and
Cost Price CH
Price type Sale company A : Cost Price / EUR
Price type Sale company B : Cost Price CH / CHF
Cost Price of Product A, company A: 60
Cost Price CH of Product A, company B: 70
Product A in company A: The cost price is 60 * currency rate
Product A in company B: The cost price is 70 * currency rate
''',
'author': "Camptocamp,Odoo Community Association (OCA)",
'website': 'http://camptocamp.com',
'depends': ['product', ],
'data': [
'pricelist_view.xml',
'company_view.xml',
'security/pricelist_security.xml',
],
'demo': [],
'test': [],
'installable': False,
'auto_install': False,
'application': False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pfnet/chainer | tests/chainer_tests/functions_tests/pooling_tests/test_roi_max_align_2d.py | 4 | 4400 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
@testing.parameterize(*testing.product({
'sampling_ratio': [
None, 1, 2, (None, 3), (1, 2),
(numpy.int32(1), numpy.int32(2)),
],
'outsize': [
5, 7, (5, 7),
(numpy.int32(5), numpy.int32(7)),
],
'spatial_scale': [
0.6, 1.0, 2.0, numpy.float32(0.6), numpy.int32(2),
],
}))
class TestROIMaxAlign2D(unittest.TestCase):
def setUp(self):
N = 3
n_channels = 3
self.x = pooling_nd_helper.shuffled_linspace(
(N, n_channels, 12, 8), numpy.float32)
self.rois = numpy.array([
[1, 1, 6, 6],
[6, 2, 7, 11],
[3, 1, 5, 10],
[3, 3, 3, 3],
[1.1, 2.2, 3.3, 4.4],
], dtype=numpy.float32)
self.roi_indices = numpy.array([0, 2, 1, 0, 2], dtype=numpy.int32)
n_rois = self.rois.shape[0]
outsize = _pair(self.outsize)
self.gy = numpy.random.uniform(
-1, 1, (n_rois, n_channels,
outsize[0], outsize[1])).astype(numpy.float32)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.roi_max_align_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
self.assertEqual(y.data.dtype, numpy.float32)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
@attr.gpu
@condition.retry(3)
def test_forward_cpu_gpu_equal(self):
# cpu
x_cpu = chainer.Variable(self.x)
rois_cpu = chainer.Variable(self.rois)
roi_index_cpu = chainer.Variable(self.roi_indices)
y_cpu = functions.roi_max_align_2d(
x_cpu, rois_cpu, roi_index_cpu,
outsize=self.outsize, spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
# gpu
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
rois_gpu = chainer.Variable(cuda.to_gpu(self.rois))
roi_index_gpu = chainer.Variable(cuda.to_gpu(self.roi_indices))
y_gpu = functions.roi_max_align_2d(
x_gpu, rois_gpu, roi_index_gpu,
outsize=self.outsize, spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
testing.assert_allclose(y_cpu.data, cuda.to_cpu(y_gpu.data))
def check_backward(self, x_data, roi_data, roi_index_data, y_grad):
def f(x, rois, roi_indices):
y = functions.roi_max_align_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio)
xp = chainer.backend.get_array_module(y)
y = functions.where(
xp.isinf(y.array), xp.zeros(y.shape, dtype=y.dtype), y)
return y
gradient_check.check_backward(
f, (x_data, roi_data, roi_index_data), y_grad,
no_grads=[False, True, True], **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.roi_indices, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| mit |
lentinj/u-boot | tools/patman/patchstream.py | 20 | 16308 | # Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import os
import re
import shutil
import tempfile
import command
import commit
import gitutil
from series import Series
# Tags that we detect and remove
re_remove = re.compile('^BUG=|^TEST=|^Change-Id:|^Review URL:'
'|Reviewed-on:|Reviewed-by:')
# Lines which are allowed after a TEST= line
re_allowed_after_test = re.compile('^Signed-off-by:')
# The start of the cover letter
re_cover = re.compile('^Cover-letter:')
# Patch series tag
re_series = re.compile('^Series-(\w*): *(.*)')
# Commit tags that we want to collect and keep
re_tag = re.compile('^(Tested-by|Acked-by|Signed-off-by|Cc): (.*)')
# The start of a new commit in the git log
re_commit = re.compile('^commit (.*)')
# We detect these since checkpatch doesn't always do it
re_space_before_tab = re.compile('^[+].* \t')
# States we can be in - can we use range() and still have comments?
STATE_MSG_HEADER = 0 # Still in the message header
STATE_PATCH_SUBJECT = 1 # In patch subject (first line of log for a commit)
STATE_PATCH_HEADER = 2 # In patch header (after the subject)
STATE_DIFFS = 3 # In the diff part (past --- line)
class PatchStream:
"""Class for detecting/injecting tags in a patch or series of patches
We support processing the output of 'git log' to read out the tags we
are interested in. We can also process a patch file in order to remove
unwanted tags or inject additional ones. These correspond to the two
phases of processing.
"""
def __init__(self, series, name=None, is_log=False):
self.skip_blank = False # True to skip a single blank line
self.found_test = False # Found a TEST= line
self.lines_after_test = 0 # MNumber of lines found after TEST=
self.warn = [] # List of warnings we have collected
self.linenum = 1 # Output line number we are up to
self.in_section = None # Name of start...END section we are in
self.notes = [] # Series notes
self.section = [] # The current section...END section
self.series = series # Info about the patch series
self.is_log = is_log # True if indent like git log
self.in_change = 0 # Non-zero if we are in a change list
self.blank_count = 0 # Number of blank lines stored up
self.state = STATE_MSG_HEADER # What state are we in?
self.tags = [] # Tags collected, like Tested-by...
self.signoff = [] # Contents of signoff line
self.commit = None # Current commit
def AddToSeries(self, line, name, value):
"""Add a new Series-xxx tag.
When a Series-xxx tag is detected, we come here to record it, if we
are scanning a 'git log'.
Args:
line: Source line containing tag (useful for debug/error messages)
name: Tag name (part after 'Series-')
value: Tag value (part after 'Series-xxx: ')
"""
if name == 'notes':
self.in_section = name
self.skip_blank = False
if self.is_log:
self.series.AddTag(self.commit, line, name, value)
def CloseCommit(self):
"""Save the current commit into our commit list, and reset our state"""
if self.commit and self.is_log:
self.series.AddCommit(self.commit)
self.commit = None
def FormatTags(self, tags):
out_list = []
for tag in sorted(tags):
if tag.startswith('Cc:'):
tag_list = tag[4:].split(',')
out_list += gitutil.BuildEmailList(tag_list, 'Cc:')
else:
out_list.append(tag)
return out_list
def ProcessLine(self, line):
"""Process a single line of a patch file or commit log
This process a line and returns a list of lines to output. The list
may be empty or may contain multiple output lines.
This is where all the complicated logic is located. The class's
state is used to move between different states and detect things
properly.
We can be in one of two modes:
self.is_log == True: This is 'git log' mode, where most output is
indented by 4 characters and we are scanning for tags
self.is_log == False: This is 'patch' mode, where we already have
all the tags, and are processing patches to remove junk we
don't want, and add things we think are required.
Args:
line: text line to process
Returns:
list of output lines, or [] if nothing should be output
"""
# Initially we have no output. Prepare the input line string
out = []
line = line.rstrip('\n')
if self.is_log:
if line[:4] == ' ':
line = line[4:]
# Handle state transition and skipping blank lines
series_match = re_series.match(line)
commit_match = re_commit.match(line) if self.is_log else None
tag_match = None
if self.state == STATE_PATCH_HEADER:
tag_match = re_tag.match(line)
is_blank = not line.strip()
if is_blank:
if (self.state == STATE_MSG_HEADER
or self.state == STATE_PATCH_SUBJECT):
self.state += 1
# We don't have a subject in the text stream of patch files
# It has its own line with a Subject: tag
if not self.is_log and self.state == STATE_PATCH_SUBJECT:
self.state += 1
elif commit_match:
self.state = STATE_MSG_HEADER
# If we are in a section, keep collecting lines until we see END
if self.in_section:
if line == 'END':
if self.in_section == 'cover':
self.series.cover = self.section
elif self.in_section == 'notes':
if self.is_log:
self.series.notes += self.section
else:
self.warn.append("Unknown section '%s'" % self.in_section)
self.in_section = None
self.skip_blank = True
self.section = []
else:
self.section.append(line)
# Detect the commit subject
elif not is_blank and self.state == STATE_PATCH_SUBJECT:
self.commit.subject = line
# Detect the tags we want to remove, and skip blank lines
elif re_remove.match(line):
self.skip_blank = True
# TEST= should be the last thing in the commit, so remove
# everything after it
if line.startswith('TEST='):
self.found_test = True
elif self.skip_blank and is_blank:
self.skip_blank = False
# Detect the start of a cover letter section
elif re_cover.match(line):
self.in_section = 'cover'
self.skip_blank = False
# If we are in a change list, key collected lines until a blank one
elif self.in_change:
if is_blank:
# Blank line ends this change list
self.in_change = 0
else:
self.series.AddChange(self.in_change, self.commit, line)
self.skip_blank = False
# Detect Series-xxx tags
elif series_match:
name = series_match.group(1)
value = series_match.group(2)
if name == 'changes':
# value is the version number: e.g. 1, or 2
try:
value = int(value)
except ValueError as str:
raise ValueError("%s: Cannot decode version info '%s'" %
(self.commit.hash, line))
self.in_change = int(value)
else:
self.AddToSeries(line, name, value)
self.skip_blank = True
# Detect the start of a new commit
elif commit_match:
self.CloseCommit()
self.commit = commit.Commit(commit_match.group(1)[:7])
# Detect tags in the commit message
elif tag_match:
# Onlly allow a single signoff tag
if tag_match.group(1) == 'Signed-off-by':
if self.signoff:
self.warn.append('Patch has more than one Signed-off-by '
'tag')
self.signoff += [line]
# Remove Tested-by self, since few will take much notice
elif (tag_match.group(1) == 'Tested-by' and
tag_match.group(2).find(os.getenv('USER') + '@') != -1):
self.warn.append("Ignoring %s" % line)
elif tag_match.group(1) == 'Cc':
self.commit.AddCc(tag_match.group(2).split(','))
else:
self.tags.append(line);
# Well that means this is an ordinary line
else:
pos = 1
# Look for ugly ASCII characters
for ch in line:
# TODO: Would be nicer to report source filename and line
if ord(ch) > 0x80:
self.warn.append("Line %d/%d ('%s') has funny ascii char" %
(self.linenum, pos, line))
pos += 1
# Look for space before tab
m = re_space_before_tab.match(line)
if m:
self.warn.append('Line %d/%d has space before tab' %
(self.linenum, m.start()))
# OK, we have a valid non-blank line
out = [line]
self.linenum += 1
self.skip_blank = False
if self.state == STATE_DIFFS:
pass
# If this is the start of the diffs section, emit our tags and
# change log
elif line == '---':
self.state = STATE_DIFFS
# Output the tags (signeoff first), then change list
out = []
if self.signoff:
out += self.signoff
log = self.series.MakeChangeLog(self.commit)
out += self.FormatTags(self.tags)
out += [line] + log
elif self.found_test:
if not re_allowed_after_test.match(line):
self.lines_after_test += 1
return out
def Finalize(self):
"""Close out processing of this patch stream"""
self.CloseCommit()
if self.lines_after_test:
self.warn.append('Found %d lines after TEST=' %
self.lines_after_test)
def ProcessStream(self, infd, outfd):
"""Copy a stream from infd to outfd, filtering out unwanting things.
This is used to process patch files one at a time.
Args:
infd: Input stream file object
outfd: Output stream file object
"""
# Extract the filename from each diff, for nice warnings
fname = None
last_fname = None
re_fname = re.compile('diff --git a/(.*) b/.*')
while True:
line = infd.readline()
if not line:
break
out = self.ProcessLine(line)
# Try to detect blank lines at EOF
for line in out:
match = re_fname.match(line)
if match:
last_fname = fname
fname = match.group(1)
if line == '+':
self.blank_count += 1
else:
if self.blank_count and (line == '-- ' or match):
self.warn.append("Found possible blank line(s) at "
"end of file '%s'" % last_fname)
outfd.write('+\n' * self.blank_count)
outfd.write(line + '\n')
self.blank_count = 0
self.Finalize()
def GetMetaData(start, count):
"""Reads out patch series metadata from the commits
This does a 'git log' on the relevant commits and pulls out the tags we
are interested in.
Args:
start: Commit to start from: 0=HEAD, 1=next one, etc.
count: Number of commits to list
"""
pipe = [['git', 'log', '--reverse', 'HEAD~%d' % start, '-n%d' % count]]
stdout = command.RunPipe(pipe, capture=True)
series = Series()
ps = PatchStream(series, is_log=True)
for line in stdout.splitlines():
ps.ProcessLine(line)
ps.Finalize()
return series
def FixPatch(backup_dir, fname, series, commit):
"""Fix up a patch file, by adding/removing as required.
We remove our tags from the patch file, insert changes lists, etc.
The patch file is processed in place, and overwritten.
A backup file is put into backup_dir (if not None).
Args:
fname: Filename to patch file to process
series: Series information about this patch set
commit: Commit object for this patch file
Return:
A list of errors, or [] if all ok.
"""
handle, tmpname = tempfile.mkstemp()
outfd = os.fdopen(handle, 'w')
infd = open(fname, 'r')
ps = PatchStream(series)
ps.commit = commit
ps.ProcessStream(infd, outfd)
infd.close()
outfd.close()
# Create a backup file if required
if backup_dir:
shutil.copy(fname, os.path.join(backup_dir, os.path.basename(fname)))
shutil.move(tmpname, fname)
return ps.warn
def FixPatches(series, fnames):
"""Fix up a list of patches identified by filenames
The patch files are processed in place, and overwritten.
Args:
series: The series object
fnames: List of patch files to process
"""
# Current workflow creates patches, so we shouldn't need a backup
backup_dir = None #tempfile.mkdtemp('clean-patch')
count = 0
for fname in fnames:
commit = series.commits[count]
commit.patch = fname
result = FixPatch(backup_dir, fname, series, commit)
if result:
print '%d warnings for %s:' % (len(result), fname)
for warn in result:
print '\t', warn
print
count += 1
print 'Cleaned %d patches' % count
return series
def InsertCoverLetter(fname, series, count):
"""Inserts a cover letter with the required info into patch 0
Args:
fname: Input / output filename of the cover letter file
series: Series object
count: Number of patches in the series
"""
fd = open(fname, 'r')
lines = fd.readlines()
fd.close()
fd = open(fname, 'w')
text = series.cover
prefix = series.GetPatchPrefix()
for line in lines:
if line.startswith('Subject:'):
# TODO: if more than 10 patches this should save 00/xx, not 0/xx
line = 'Subject: [%s 0/%d] %s\n' % (prefix, count, text[0])
# Insert our cover letter
elif line.startswith('*** BLURB HERE ***'):
# First the blurb test
line = '\n'.join(text[1:]) + '\n'
if series.get('notes'):
line += '\n'.join(series.notes) + '\n'
# Now the change list
out = series.MakeChangeLog(None)
line += '\n' + '\n'.join(out)
fd.write(line)
fd.close()
| gpl-2.0 |
CUCWD/edx-platform | lms/djangoapps/instructor_task/models.py | 9 | 12722 | """
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
import codecs
import csv
import hashlib
import json
import logging
import os.path
from uuid import uuid4
from boto.exception import BotoServerError
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from django.db import models, transaction
from opaque_keys.edx.django.models import CourseKeyField
from six import text_type
from openedx.core.storage import get_storage
logger = logging.getLogger(__name__)
# define custom states used by InstructorTask
QUEUING = 'QUEUING'
PROGRESS = 'PROGRESS'
class InstructorTask(models.Model):
"""
Stores information about background tasks that have been submitted to
perform work by an instructor (or course staff).
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
class Meta(object):
app_label = "instructor_task"
task_type = models.CharField(max_length=50, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True)
subtasks = models.TextField(blank=True) # JSON dictionary
def __repr__(self):
return 'InstructorTask<%r>' % ({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
def __unicode__(self):
return unicode(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
"""
Create an instance of InstructorTask.
"""
# create the task_id here, and pass it into celery:
task_id = str(uuid4())
json_task_input = json.dumps(task_input)
# check length of task_input, and return an exception if it's too long:
if len(json_task_input) > 255:
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
raise ValueError(msg)
# create the task, then save it:
instructor_task = cls(
course_id=course_id,
task_type=task_type,
task_id=task_id,
task_key=task_key,
task_input=json_task_input,
task_state=QUEUING,
requester=requester
)
instructor_task.save_now()
return instructor_task
@transaction.atomic
def save_now(self):
"""
Writes InstructorTask immediately, ensuring the transaction is committed.
"""
self.save()
@staticmethod
def create_output_for_success(returned_result):
"""
Converts successful result to output format.
Raises a ValueError exception if the output is too long.
"""
# In future, there should be a check here that the resulting JSON
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError("Length of task output is too long: {0}".format(json_output))
return json_output
@staticmethod
def create_output_for_failure(exception, traceback_string):
"""
Converts failed result information to output format.
Traceback information is truncated or not included if it would result in an output string
that would not fit in the database. If the output is still too long, then the
exception message is also truncated.
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': text_type(exception)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
json_output = json.dumps(task_progress)
# if the resulting output is too long, then first shorten the
# traceback, and then the message, until it fits.
too_long = len(json_output) - 1023
if too_long > 0:
if traceback_string is not None:
if too_long >= len(traceback_string) - len(tag):
# remove the traceback entry entirely (so no key or value)
del task_progress['traceback']
too_long -= (len(traceback_string) + len('traceback'))
else:
# truncate the traceback:
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
too_long = 0
if too_long > 0:
# we need to shorten the message:
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
json_output = json.dumps(task_progress)
return json_output
@staticmethod
def create_output_for_revoked():
"""Creates standard message to store in output format for revoked tasks."""
return json.dumps({'message': 'Task revoked before running'})
class ReportStore(object):
"""
Simple abstraction layer that can fetch and store CSV files for reports
download. Should probably refactor later to create a ReportFile object that
can simply be appended to for the sake of memory efficiency, rather than
passing in the whole dataset. Doing that for now just because it's simpler.
"""
@classmethod
def from_config(cls, config_name):
"""
Return one of the ReportStore subclasses depending on django
configuration. Look at subclasses for expected configuration.
"""
# Convert old configuration parameters to those expected by
# DjangoStorageReportStore for backward compatibility
config = getattr(settings, config_name, {})
storage_type = config.get('STORAGE_TYPE', '').lower()
if storage_type == 's3':
return DjangoStorageReportStore(
storage_class='openedx.core.storage.S3ReportStorage',
storage_kwargs={
'bucket': config['BUCKET'],
'location': config['ROOT_PATH'],
'custom_domain': config.get("CUSTOM_DOMAIN", None),
'querystring_expire': 300,
'gzip': True,
},
)
elif storage_type == 'localfs':
return DjangoStorageReportStore(
storage_class='django.core.files.storage.FileSystemStorage',
storage_kwargs={
'location': config['ROOT_PATH'],
},
)
return DjangoStorageReportStore.from_config(config_name)
def _get_utf8_encoded_rows(self, rows):
"""
Given a list of `rows` containing unicode strings, return a
new list of rows with those strings encoded as utf-8 for CSV
compatibility.
"""
for row in rows:
yield [unicode(item).encode('utf-8') for item in row]
class DjangoStorageReportStore(ReportStore):
"""
ReportStore implementation that delegates to django's storage api.
"""
def __init__(self, storage_class=None, storage_kwargs=None):
if storage_kwargs is None:
storage_kwargs = {}
self.storage = get_storage(storage_class, **storage_kwargs)
@classmethod
def from_config(cls, config_name):
"""
By default, the default file storage specified by the `DEFAULT_FILE_STORAGE`
setting will be used. To configure the storage used, add a dict in
settings with the following fields::
STORAGE_CLASS : The import path of the storage class to use. If
not set, the DEFAULT_FILE_STORAGE setting will be used.
STORAGE_KWARGS : An optional dict of kwargs to pass to the storage
constructor. This can be used to specify a
different S3 bucket or root path, for example.
Reference the setting name when calling `.from_config`.
"""
return cls(
getattr(settings, config_name).get('STORAGE_CLASS'),
getattr(settings, config_name).get('STORAGE_KWARGS'),
)
def store(self, course_id, filename, buff):
"""
Store the contents of `buff` in a directory determined by hashing
`course_id`, and name the file `filename`. `buff` can be any file-like
object, ready to be read from the beginning.
"""
path = self.path_to(course_id, filename)
self.storage.save(path, buff)
def store_rows(self, course_id, filename, rows):
"""
Given a course_id, filename, and rows (each row is an iterable of
strings), write the rows to the storage backend in csv format.
"""
output_buffer = ContentFile('')
# Adding unicode signature (BOM) for MS Excel 2013 compatibility
output_buffer.write(codecs.BOM_UTF8)
csvwriter = csv.writer(output_buffer)
csvwriter.writerows(self._get_utf8_encoded_rows(rows))
output_buffer.seek(0)
self.store(course_id, filename, output_buffer)
def links_for(self, course_id):
"""
For a given `course_id`, return a list of `(filename, url)` tuples.
Calls the `url` method of the underlying storage backend. Returned
urls can be plugged straight into an href
"""
course_dir = self.path_to(course_id)
try:
_, filenames = self.storage.listdir(course_dir)
except OSError:
# Django's FileSystemStorage fails with an OSError if the course
# dir does not exist; other storage types return an empty list.
return []
except BotoServerError as ex:
logger.error(
u'Fetching files failed for course: %s, status: %s, reason: %s',
course_id,
ex.status,
ex.reason
)
return []
files = [(filename, os.path.join(course_dir, filename)) for filename in filenames]
files.sort(key=lambda f: self.storage.modified_time(f[1]), reverse=True)
return [
(filename, self.storage.url(full_path))
for filename, full_path in files
]
def path_to(self, course_id, filename=''):
"""
Return the full path to a given file for a given course.
"""
hashed_course_id = hashlib.sha1(text_type(course_id)).hexdigest()
return os.path.join(hashed_course_id, filename)
| agpl-3.0 |
alien4cloud/alien4cloud-cloudify3-provider | alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/artifact_testOverridden/wrapper/Artifact_Directory_Test/tosca.interfaces.node.lifecycle.Standard/create/_a4c_create.py | 4 | 17400 |
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
env_map['REQUIREMENT_PROPERTY'] = ''
env_map['CAPABILITY_PROPERTY'] = r'''It\'s really a great day to begin
to love
'''
node_artifacts = {
"confs_directory": [
{
"relative_path": "settings.properties",
"absolute_path": "_a4c_artifact/Artifact_Directory_Test/confs_directory/conf/settings.properties"
}
,
{
"relative_path": "test/nestedDirTest.txt",
"absolute_path": "_a4c_artifact/Artifact_Directory_Test/confs_directory/conf/test/nestedDirTest.txt"
}
,
{
"relative_path": "log.properties",
"absolute_path": "_a4c_artifact/Artifact_Directory_Test/confs_directory/conf/log.properties"
}
]
}
relationship_artifacts = {
}
artifacts = node_artifacts.copy()
artifacts.update(relationship_artifacts)
download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads')
env_map.update(download_artifacts(artifacts, download_dir))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Artifact_Directory_Test/tosca.interfaces.node.lifecycle.Standard/create/assertDirectoryCopied.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:create:{0}'.format(k)] = v
ctx.instance.update()
| apache-2.0 |
edx/edx-platform | openedx/core/djangoapps/oauth_dispatch/views.py | 4 | 4657 | """
Views that dispatch processing of OAuth requests to django-oauth2-provider or
django-oauth-toolkit as appropriate.
"""
import json
from django.conf import settings
from django.utils.decorators import method_decorator
from django.views.generic import View
from edx_django_utils import monitoring as monitoring_utils
from oauth2_provider import views as dot_views
from ratelimit import ALL
from ratelimit.decorators import ratelimit
from openedx.core.djangoapps.auth_exchange import views as auth_exchange_views
from openedx.core.djangoapps.oauth_dispatch import adapters
from openedx.core.djangoapps.oauth_dispatch.dot_overrides import views as dot_overrides_views
from openedx.core.djangoapps.oauth_dispatch.jwt import create_jwt_from_token
class _DispatchingView(View):
"""
Base class that route views to the appropriate provider view. The default
behavior routes based on client_id, but this can be overridden by redefining
`select_backend()` if particular views need different behavior.
"""
dot_adapter = adapters.DOTAdapter()
def get_adapter(self, request):
"""
Returns the appropriate adapter based on the OAuth client linked to the request.
"""
client_id = self._get_client_id(request)
monitoring_utils.set_custom_attribute('oauth_client_id', client_id)
return self.dot_adapter
def dispatch(self, request, *args, **kwargs):
"""
Dispatch the request to the selected backend's view.
"""
backend = self.select_backend(request)
view = self.get_view_for_backend(backend)
return view(request, *args, **kwargs)
def select_backend(self, request):
"""
Given a request that specifies an oauth `client_id`, return the adapter
for the appropriate OAuth handling library. If the client_id is found
in a django-oauth-toolkit (DOT) Application, use the DOT adapter,
otherwise use the django-oauth2-provider (DOP) adapter, and allow the
calls to fail normally if the client does not exist.
"""
return self.get_adapter(request).backend
def get_view_for_backend(self, backend):
"""
Return the appropriate view from the requested backend.
"""
if backend == self.dot_adapter.backend:
return self.dot_view.as_view() # lint-amnesty, pylint: disable=no-member
else:
raise KeyError(f'Failed to dispatch view. Invalid backend {backend}')
def _get_client_id(self, request):
"""
Return the client_id from the provided request
"""
if request.method == 'GET':
return request.GET.get('client_id')
else:
return request.POST.get('client_id')
@method_decorator(
ratelimit(
key='openedx.core.djangoapps.util.ratelimit.real_ip', rate=settings.RATELIMIT_RATE,
method=ALL, block=True
), name='dispatch'
)
class AccessTokenView(_DispatchingView):
"""
Handle access token requests.
"""
dot_view = dot_views.TokenView
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
token_type = request.POST.get('token_type',
request.META.get('HTTP_X_TOKEN_TYPE', 'no_token_type_supplied')).lower()
monitoring_utils.set_custom_attribute('oauth_token_type', token_type)
monitoring_utils.set_custom_attribute('oauth_grant_type', request.POST.get('grant_type', ''))
if response.status_code == 200 and token_type == 'jwt':
response.content = self._build_jwt_response_from_access_token_response(request, response)
return response
def _build_jwt_response_from_access_token_response(self, request, response):
""" Builds the content of the response, including the JWT token. """
token_dict = json.loads(response.content.decode('utf-8'))
jwt = create_jwt_from_token(token_dict, self.get_adapter(request))
token_dict.update({
'access_token': jwt,
'token_type': 'JWT',
})
return json.dumps(token_dict)
class AuthorizationView(_DispatchingView):
"""
Part of the authorization flow.
"""
dot_view = dot_overrides_views.EdxOAuth2AuthorizationView
class AccessTokenExchangeView(_DispatchingView):
"""
Exchange a third party auth token.
"""
dot_view = auth_exchange_views.DOTAccessTokenExchangeView
class RevokeTokenView(_DispatchingView):
"""
Dispatch to the RevokeTokenView of django-oauth-toolkit
"""
dot_view = dot_views.RevokeTokenView
| agpl-3.0 |
Justin-Yuan/Image2Music-Generator | library/jython2.5.3/Lib/wsgiref/headers.py | 104 | 5916 | """Manage HTTP Response Headers
Much of this module is red-handedly pilfered from email.Message in the stdlib,
so portions are Copyright (C) 2001,2002 Python Software Foundation, and were
written by Barry Warsaw.
"""
from types import ListType, TupleType
# Regular expression that matches `special' characters in parameters, the
# existance of which force quoting of the parameter value.
import re
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers:
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if type(headers) is not ListType:
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
#
| gpl-2.0 |
alexsavio/SandS-EDB | src/settings.py | 1 | 2867 | import socket
import models
import os
import sys
basedir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(basedir))
from keys import *
#References:
#http://python-eve.org/config.html
# We want to run seamlessly our API both locally and on Heroku, so:
hn = socket.gethostname()
if hn == 'gicSands':
# Running on local machine. Let's just use the local mongod instance.
# Please note that MONGO_HOST and MONGO_PORT could very well be left
# out as they already default to a bare bones local 'mongod' instance.
MONGO_HOST = mongo_host
MONGO_PORT = 27017
MONGO_USERNAME = mongo_user
MONGO_PASSWORD = mongo_password
MONGO_DBNAME = mongo_sands_db
# let's not forget the API entry point
SERVER_NAME = server_name
URL_PREFIX = 'api'
elif hn == 'buccaneer':
# Running on local machine. Let's just use the local mongod instance.
# Please note that MONGO_HOST and MONGO_PORT could very well be left
# out as they already default to a bare bones local 'mongod' instance.
MONGO_HOST = mongo_host
MONGO_PORT = 27017
MONGO_USERNAME = mongo_user
MONGO_PASSWORD = mongo_password
MONGO_DBNAME = mongo_sands_db
# let's not forget the API entry point
SERVER_NAME = localhost_server_name
URL_PREFIX = 'api'
elif hn == 'corsair':
# Running on local machine. Let's just use the local mongod instance.
# Please note that MONGO_HOST and MONGO_PORT could very well be left
# out as they already default to a bare bones local 'mongod' instance.
MONGO_HOST = mongo_host
MONGO_PORT = 27017
MONGO_USERNAME = mongo_user
MONGO_PASSWORD = mongo_password
MONGO_DBNAME = mongo_sands_db
# let's not forget the API entry point
SERVER_NAME = localhost_server_name
URL_PREFIX = 'api'
elif hn == 'ns3366758':
# Running on local machine. Let's just use the local mongod instance.
# Please note that MONGO_HOST and MONGO_PORT could very well be left
# out as they already default to a bare bones local 'mongod' instance.
MONGO_HOST = mongo_host
MONGO_PORT = 27017
MONGO_USERNAME = mongo_user
MONGO_PASSWORD = mongo_password
MONGO_DBNAME = mongo_sands_db
# let's not forget the API entry point
SERVER_NAME = localhost_server_name
URL_PREFIX = 'api'
#http://python-eve.org/features.html#hateoas-feature
HATEOAS = False
ALLOWED_ROLES = ['superuser', 'admin']
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
# (if you omit this line, the API will default to ['GET'] and provide
# read-only access to the endpoint).
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
# Enable reads (GET), edits (PATCH), replacements (PUT) and deletes of
# individual items (defaults to read-only item access).
ITEM_METHODS = ['GET', 'PATCH', 'PUT', 'DELETE']
DOMAIN = models.DOMAIN
| gpl-3.0 |
UBERMALLOW/external_skia | tools/find_bad_images_in_skps.py | 172 | 7405 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script will take as an argument either a list of skp files or a
set of directories that contains skp files. It will then test each
skp file with the `render_pictures` program. If that program either
spits out any unexpected output or doesn't return 0, I will flag that
skp file as problematic. We then extract all of the embedded images
inside the skp and test each one of them against the
SkImageDecoder::DecodeFile function. Again, we consider any
extraneous output or a bad return value an error. In the event of an
error, we retain the image and print out information about the error.
The output (on stdout) is formatted as a csv document.
A copy of each bad image is left in a directory created by
tempfile.mkdtemp().
"""
import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import test_rendering # skia/trunk/tools. reuse FindPathToProgram()
USAGE = """
Usage:
{command} SKP_FILE [SKP_FILES]
{command} SKP_DIR [SKP_DIRS]\n
Environment variables:
To run multiple worker threads, set NUM_THREADS.
To use a different temporary storage location, set TMPDIR.
"""
def execute_program(args, ignores=None):
"""
Execute a process and waits for it to complete. Returns all
output (stderr and stdout) after (optional) filtering.
@param args is passed into subprocess.Popen().
@param ignores (optional) is a list of regular expression strings
that will be ignored in the output.
@returns a tuple (returncode, output)
"""
if ignores is None:
ignores = []
else:
ignores = [re.compile(ignore) for ignore in ignores]
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = ''.join(
line for line in proc.stdout
if not any(bool(ignore.match(line)) for ignore in ignores))
returncode = proc.wait()
return (returncode, output)
def list_files(paths):
"""
Accepts a list of directories or filenames on the command line.
We do not choose to recurse into directories beyond one level.
"""
class NotAFileException(Exception):
pass
for path in paths:
for globbedpath in glob.iglob(path): # useful on win32
if os.path.isdir(globbedpath):
for filename in os.listdir(globbedpath):
newpath = os.path.join(globbedpath, filename)
if os.path.isfile(newpath):
yield newpath
elif os.path.isfile(globbedpath):
yield globbedpath
else:
raise NotAFileException('{} is not a file'.format(globbedpath))
class BadImageFinder(object):
def __init__(self, directory=None):
self.render_pictures = test_rendering.FindPathToProgram(
'render_pictures')
self.test_image_decoder = test_rendering.FindPathToProgram(
'test_image_decoder')
assert os.path.isfile(self.render_pictures)
assert os.path.isfile(self.test_image_decoder)
if directory is None:
self.saved_image_dir = tempfile.mkdtemp(prefix='skia_skp_test_')
else:
assert os.path.isdir(directory)
self.saved_image_dir = directory
self.bad_image_count = 0
def process_files(self, skp_files):
for path in skp_files:
self.process_file(path)
def process_file(self, skp_file):
assert self.saved_image_dir is not None
assert os.path.isfile(skp_file)
args = [self.render_pictures, '--readPath', skp_file]
ignores = ['^process_in', '^deserializ', '^drawing...', '^Non-defaul']
returncode, output = execute_program(args, ignores)
if (returncode == 0) and not output:
return
temp_image_dir = tempfile.mkdtemp(prefix='skia_skp_test___')
args = [ self.render_pictures, '--readPath', skp_file,
'--writePath', temp_image_dir, '--writeEncodedImages']
subprocess.call(args, stderr=open(os.devnull,'w'),
stdout=open(os.devnull,'w'))
for image_name in os.listdir(temp_image_dir):
image_path = os.path.join(temp_image_dir, image_name)
assert(os.path.isfile(image_path))
args = [self.test_image_decoder, image_path]
returncode, output = execute_program(args, [])
if (returncode == 0) and not output:
os.remove(image_path)
continue
try:
shutil.move(image_path, self.saved_image_dir)
except (shutil.Error,):
# If this happens, don't stop the entire process,
# just warn the user.
os.remove(image_path)
sys.stderr.write('{0} is a repeat.\n'.format(image_name))
self.bad_image_count += 1
if returncode == 2:
returncode = 'SkImageDecoder::DecodeFile returns false'
elif returncode == 0:
returncode = 'extra verbosity'
assert output
elif returncode == -11:
returncode = 'segmentation violation'
else:
returncode = 'returncode: {}'.format(returncode)
output = output.strip().replace('\n',' ').replace('"','\'')
suffix = image_name[-3:]
output_line = '"{0}","{1}","{2}","{3}","{4}"\n'.format(
returncode, suffix, skp_file, image_name, output)
sys.stdout.write(output_line)
sys.stdout.flush()
os.rmdir(temp_image_dir)
return
def main(main_argv):
if not main_argv or main_argv[0] in ['-h', '-?', '-help', '--help']:
sys.stderr.write(USAGE.format(command=__file__))
return 1
if 'NUM_THREADS' in os.environ:
number_of_threads = int(os.environ['NUM_THREADS'])
if number_of_threads < 1:
number_of_threads = 1
else:
number_of_threads = 1
os.environ['skia_images_png_suppressDecoderWarnings'] = 'true'
os.environ['skia_images_jpeg_suppressDecoderWarnings'] = 'true'
temp_dir = tempfile.mkdtemp(prefix='skia_skp_test_')
sys.stderr.write('Directory for bad images: {}\n'.format(temp_dir))
sys.stdout.write('"Error","Filetype","SKP File","Image File","Output"\n')
sys.stdout.flush()
finders = [
BadImageFinder(temp_dir) for index in xrange(number_of_threads)]
arguments = [[] for index in xrange(number_of_threads)]
for index, item in enumerate(list_files(main_argv)):
## split up the given targets among the worker threads
arguments[index % number_of_threads].append(item)
threads = [
threading.Thread(
target=BadImageFinder.process_files, args=(finder,argument))
for finder, argument in zip(finders, arguments)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
number = sum(finder.bad_image_count for finder in finders)
sys.stderr.write('Number of bad images found: {}\n'.format(number))
return 0
if __name__ == '__main__':
exit(main(sys.argv[1:]))
# LocalWords: skp stdout csv
| bsd-3-clause |
camilonos77/bootstrap-form-python-generator | enviroment/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.py | 1229 | 1457 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
import gettext
_ = gettext.gettext
from . import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
| gpl-2.0 |
marcelm/xopen | tests/test_xopen.py | 1 | 12338 | import io
import os
import random
import shutil
import signal
import sys
import time
import pytest
from pathlib import Path
from xopen import xopen, PipedCompressionWriter, PipedGzipReader, \
PipedGzipWriter, _MAX_PIPE_SIZE, _can_read_concatenated_gz
extensions = ["", ".gz", ".bz2"]
try:
import lzma
extensions.append(".xz")
except ImportError:
lzma = None
try:
import fcntl
if not hasattr(fcntl, "F_GETPIPE_SZ") and sys.platform == "linux":
setattr(fcntl, "F_GETPIPE_SZ", 1032)
except ImportError:
fcntl = None
base = "tests/file.txt"
files = [base + ext for ext in extensions]
CONTENT_LINES = ['Testing, testing ...\n', 'The second line.\n']
CONTENT = ''.join(CONTENT_LINES)
@pytest.fixture(params=extensions)
def ext(request):
return request.param
@pytest.fixture(params=files)
def fname(request):
return request.param
@pytest.fixture
def lacking_pigz_permissions(tmp_path):
"""
Set PATH to a directory that contains a pigz binary with permissions set to 000.
If no suitable pigz binary could be found, PATH is set to an empty directory
"""
pigz_path = shutil.which("pigz")
if pigz_path:
shutil.copy(pigz_path, str(tmp_path))
os.chmod(str(tmp_path / "pigz"), 0)
path = os.environ["PATH"]
os.environ["PATH"] = str(tmp_path)
yield
os.environ["PATH"] = path
@pytest.fixture
def large_gzip(tmpdir):
path = str(tmpdir.join("large.gz"))
random_text = ''.join(random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ\n') for _ in range(1024))
# Make the text a lot bigger in order to ensure that it is larger than the
# pipe buffer size.
random_text *= 1024
with xopen(path, 'w') as f:
f.write(random_text)
return path
@pytest.fixture
def truncated_gzip(large_gzip):
with open(large_gzip, 'a') as f:
f.truncate(os.stat(large_gzip).st_size - 10)
return large_gzip
def test_xopen_text(fname):
with xopen(fname, 'rt') as f:
lines = list(f)
assert len(lines) == 2
assert lines[1] == 'The second line.\n', fname
def test_xopen_binary(fname):
with xopen(fname, 'rb') as f:
lines = list(f)
assert len(lines) == 2
assert lines[1] == b'The second line.\n', fname
def test_no_context_manager_text(fname):
f = xopen(fname, 'rt')
lines = list(f)
assert len(lines) == 2
assert lines[1] == 'The second line.\n', fname
f.close()
assert f.closed
def test_no_context_manager_binary(fname):
f = xopen(fname, 'rb')
lines = list(f)
assert len(lines) == 2
assert lines[1] == b'The second line.\n', fname
f.close()
assert f.closed
def test_readinto(fname):
# Test whether .readinto() works
content = CONTENT.encode('utf-8')
with xopen(fname, 'rb') as f:
b = bytearray(len(content) + 100)
length = f.readinto(b)
assert length == len(content)
assert b[:length] == content
def test_pipedgzipreader_readinto():
# Test whether PipedGzipReader.readinto works
content = CONTENT.encode('utf-8')
with PipedGzipReader("tests/file.txt.gz", "rb") as f:
b = bytearray(len(content) + 100)
length = f.readinto(b)
assert length == len(content)
assert b[:length] == content
def test_pipedgzipreader_textiowrapper():
with PipedGzipReader("tests/file.txt.gz", "rb") as f:
wrapped = io.TextIOWrapper(f)
assert wrapped.read() == CONTENT
def test_detect_gzip_file_format_from_content():
with xopen("tests/file.txt.gz.test", "rb") as fh:
assert fh.readline() == CONTENT_LINES[0].encode("utf-8")
def test_detect_bz2_file_format_from_content():
with xopen("tests/file.txt.bz2.test", "rb") as fh:
assert fh.readline() == CONTENT_LINES[0].encode("utf-8")
def test_readline(fname):
first_line = CONTENT_LINES[0].encode('utf-8')
with xopen(fname, 'rb') as f:
assert f.readline() == first_line
def test_readline_text(fname):
with xopen(fname, 'r') as f:
assert f.readline() == CONTENT_LINES[0]
def test_readline_pipedgzipreader():
first_line = CONTENT_LINES[0].encode('utf-8')
with PipedGzipReader("tests/file.txt.gz", "rb") as f:
assert f.readline() == first_line
def test_readline_text_pipedgzipreader():
with PipedGzipReader("tests/file.txt.gz", "r") as f:
assert f.readline() == CONTENT_LINES[0]
@pytest.mark.parametrize("threads", [None, 1, 2])
def test_pipedgzipreader_iter(threads):
with PipedGzipReader("tests/file.txt.gz", mode="r", threads=threads) as f:
lines = list(f)
assert lines[0] == CONTENT_LINES[0]
def test_next(fname):
with xopen(fname, "rt") as f:
_ = next(f)
line2 = next(f)
assert line2 == 'The second line.\n', fname
def test_xopen_has_iter_method(ext, tmpdir):
path = str(tmpdir.join("out" + ext))
with xopen(path, mode='w') as f:
assert hasattr(f, '__iter__')
def test_pipedgzipwriter_has_iter_method(tmpdir):
with PipedGzipWriter(str(tmpdir.join("out.gz"))) as f:
assert hasattr(f, '__iter__')
def test_iter_without_with(fname):
f = xopen(fname, "rt")
it = iter(f)
assert CONTENT_LINES[0] == next(it)
f.close()
def test_pipedgzipreader_iter_without_with():
it = iter(PipedGzipReader("tests/file.txt.gz"))
assert CONTENT_LINES[0] == next(it)
@pytest.mark.parametrize("mode", ["rb", "rt"])
def test_pipedgzipreader_close(large_gzip, mode):
with PipedGzipReader(large_gzip, mode=mode) as f:
f.readline()
time.sleep(0.2)
# The subprocess should be properly terminated now
def test_partial_gzip_iteration_closes_correctly(large_gzip):
class LineReader:
def __init__(self, file):
self.file = xopen(file, "rb")
def __iter__(self):
wrapper = io.TextIOWrapper(self.file)
yield from wrapper
f = LineReader(large_gzip)
next(iter(f))
f.file.close()
def test_nonexisting_file(ext):
with pytest.raises(IOError):
with xopen('this-file-does-not-exist' + ext):
pass # pragma: no cover
def test_write_to_nonexisting_dir(ext):
with pytest.raises(IOError):
with xopen('this/path/does/not/exist/file.txt' + ext, 'w'):
pass # pragma: no cover
def test_invalid_mode():
with pytest.raises(ValueError):
with xopen("tests/file.txt.gz", mode="hallo"):
pass # pragma: no cover
def test_filename_not_a_string():
with pytest.raises(TypeError):
with xopen(123, mode="r"):
pass # pragma: no cover
def test_invalid_compression_level(tmpdir):
path = str(tmpdir.join("out.gz"))
with pytest.raises(ValueError) as e:
with xopen(path, mode="w", compresslevel=17) as f:
f.write("hello") # pragma: no cover
assert "between 1 and 9" in e.value.args[0]
@pytest.mark.parametrize("ext", extensions)
def test_append(ext, tmpdir):
text = b"AB"
reference = text + text
path = str(tmpdir.join("the-file" + ext))
with xopen(path, "ab") as f:
f.write(text)
with xopen(path, "ab") as f:
f.write(text)
with xopen(path, "r") as f:
for appended in f:
pass
reference = reference.decode("utf-8")
assert appended == reference
@pytest.mark.parametrize("ext", extensions)
def test_append_text(ext, tmpdir):
text = "AB"
reference = text + text
path = str(tmpdir.join("the-file" + ext))
with xopen(path, "at") as f:
f.write(text)
with xopen(path, "at") as f:
f.write(text)
with xopen(path, "rt") as f:
for appended in f:
pass
assert appended == reference
class TookTooLongError(Exception):
pass
class timeout:
# copied from https://stackoverflow.com/a/22348885/715090
def __init__(self, seconds=1):
self.seconds = seconds
def handle_timeout(self, signum, frame):
raise TookTooLongError() # pragma: no cover
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def test_truncated_gz(truncated_gzip):
with timeout(seconds=2):
with pytest.raises((EOFError, IOError)):
f = xopen(truncated_gzip, "r")
f.read()
f.close() # pragma: no cover
def test_truncated_gz_iter(truncated_gzip):
with timeout(seconds=2):
with pytest.raises((EOFError, IOError)):
f = xopen(truncated_gzip, 'r')
for line in f:
pass
f.close() # pragma: no cover
def test_truncated_gz_with(truncated_gzip):
with timeout(seconds=2):
with pytest.raises((EOFError, IOError)):
with xopen(truncated_gzip, 'r') as f:
f.read()
def test_truncated_gz_iter_with(truncated_gzip):
with timeout(seconds=2):
with pytest.raises((EOFError, IOError)):
with xopen(truncated_gzip, 'r') as f:
for line in f:
pass
def test_bare_read_from_gz():
with xopen('tests/hello.gz', 'rt') as f:
assert f.read() == 'hello'
def test_read_piped_gzip():
with PipedGzipReader('tests/hello.gz', 'rt') as f:
assert f.read() == 'hello'
def test_write_pigz_threads(tmpdir):
path = str(tmpdir.join('out.gz'))
with xopen(path, mode='w', threads=3) as f:
f.write('hello')
with xopen(path) as f:
assert f.read() == 'hello'
def test_read_gzip_no_threads():
import gzip
with xopen("tests/hello.gz", "rb", threads=0) as f:
assert isinstance(f, gzip.GzipFile), f
def test_write_gzip_no_threads(tmpdir):
import gzip
path = str(tmpdir.join("out.gz"))
with xopen(path, "wb", threads=0) as f:
assert isinstance(f, gzip.GzipFile), f
def test_write_stdout():
f = xopen('-', mode='w')
print("Hello", file=f)
f.close()
# ensure stdout is not closed
print("Still there?")
def test_write_stdout_contextmanager():
# Do not close stdout
with xopen('-', mode='w') as f:
print("Hello", file=f)
# ensure stdout is not closed
print("Still there?")
def test_read_pathlib(fname):
path = Path(fname)
with xopen(path, mode='rt') as f:
assert f.read() == CONTENT
def test_read_pathlib_binary(fname):
path = Path(fname)
with xopen(path, mode='rb') as f:
assert f.read() == bytes(CONTENT, 'ascii')
def test_write_pathlib(ext, tmpdir):
path = Path(str(tmpdir)) / ('hello.txt' + ext)
with xopen(path, mode='wt') as f:
f.write('hello')
with xopen(path, mode='rt') as f:
assert f.read() == 'hello'
def test_write_pathlib_binary(ext, tmpdir):
path = Path(str(tmpdir)) / ('hello.txt' + ext)
with xopen(path, mode='wb') as f:
f.write(b'hello')
with xopen(path, mode='rb') as f:
assert f.read() == b'hello'
# lzma doesn’t work on PyPy3 at the moment
if lzma is not None:
def test_detect_xz_file_format_from_content():
with xopen("tests/file.txt.xz.test", "rb") as fh:
assert fh.readline() == CONTENT_LINES[0].encode("utf-8")
def test_concatenated_gzip_function():
assert _can_read_concatenated_gz("gzip") is True
assert _can_read_concatenated_gz("pigz") is True
assert _can_read_concatenated_gz("xz") is False
@pytest.mark.skipif(
not hasattr(fcntl, "F_GETPIPE_SZ") or _MAX_PIPE_SIZE is None,
reason="Pipe size modifications not available on this platform.")
def test_pipesize_changed(tmpdir):
path = Path(str(tmpdir), "hello.gz")
with xopen(path, "wb") as f:
assert isinstance(f, PipedCompressionWriter)
assert fcntl.fcntl(f._file.fileno(),
fcntl.F_GETPIPE_SZ) == _MAX_PIPE_SIZE
def test_xopen_falls_back_to_gzip_open(lacking_pigz_permissions):
with xopen("tests/file.txt.gz", "rb") as f:
assert f.readline() == CONTENT_LINES[0].encode("utf-8")
def test_open_many_gzip_writers(tmp_path):
files = []
for i in range(1, 61):
path = tmp_path / "{:03d}.txt.gz".format(i)
f = xopen(path, "wb", threads=2)
f.write(b"hello")
files.append(f)
for f in files:
f.close()
| mit |
valhallasw/pywikibot-core | tests/archivebot_tests.py | 5 | 3438 | # -*- coding: utf-8 -*-
"""Tests for archivebot scripts."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
from datetime import datetime
import sys
import pywikibot
import pywikibot.page
from pywikibot.textlib import TimeStripper
from scripts import archivebot
from tests.aspects import unittest, TestCase
if sys.version_info[0] > 2:
basestring = (str,)
THREADS = {
'als': 4, 'ar': 1, 'bar': 0, 'bg': 0, 'bjn': 1, 'bs': 0, 'ca': 5, 'ckb': 2,
'cs': 0, 'de': 1, 'en': 25, 'eo': 1, 'es': 13, 'fa': 2, 'fr': 25, 'frr': 2,
'hi': 0, 'hr': 2, 'hu': 5, 'id': 3, 'it': 25, 'ja': 4, 'la': 0, 'lt': 1,
'nl': 9, 'nn': 0, 'no': 0, 'pdc': 25, 'pfl': 3, 'pl': 8, 'pt': 0, 'ro': 1,
'ru': 20, 'scn': 2, 'simple': 1, 'sr': 0, 'sv': 5, 'th': 1, 'tr': 7,
'ug': 0, 'uk': 1, 'uz': 1, 'vi': 1, 'zh': 4, 'zh-yue': 2,
}
class TestArchiveBot(TestCase):
"""Test archivebot script on 40+ Wikipedia sites."""
family = 'wikipedia'
sites = dict([(code, {'family': 'wikipedia', 'code': code})
for code in THREADS])
cached = True
def test_archivebot(self, code=None):
"""Test archivebot for one site."""
site = self.get_site(code)
if code != 'de': # bug 67663
page = pywikibot.Page(site, 'user talk:xqt')
else:
page = pywikibot.Page(site, 'user talk:ladsgroup')
talk = archivebot.DiscussionPage(page, None)
self.assertIsInstance(talk.archives, dict)
self.assertIsInstance(talk.archived_threads, int)
self.assertTrue(talk.archiver is None)
self.assertIsInstance(talk.header, basestring)
self.assertIsInstance(talk.timestripper, TimeStripper)
self.assertIsInstance(talk.threads, list)
self.assertGreaterEqual(
len(talk.threads), THREADS[code],
u'%d Threads found on %s,\n%d or more expected'
% (len(talk.threads), talk, THREADS[code]))
for thread in talk.threads:
self.assertIsInstance(thread, archivebot.DiscussionThread)
self.assertIsInstance(thread.title, basestring)
self.assertIsInstance(thread.now, datetime)
self.assertEqual(thread.now, talk.now)
self.assertIsInstance(thread.ts, TimeStripper)
self.assertEqual(thread.ts, talk.timestripper)
self.assertIsInstance(thread.code, basestring)
self.assertEqual(thread.code, talk.timestripper.site.code)
self.assertIsInstance(thread.content, basestring)
try:
self.assertIsInstance(thread.timestamp, datetime)
except AssertionError:
if thread.code not in self.expected_failures:
pywikibot.output('code %s: %s' % (thread.code, thread.content))
raise
expected_failures = ['ar', 'pdc', 'th']
# expected failures - should be fixed
# 'ar': Uses Arabic acronym for TZ
# 'pdc': changed month name setting in wiki over time (?)
# in old posts in talk page, February is "Feb.", site message gives
# <message name="feb" xml:space="preserve">Han.</message>.
# for new entries it should work
# 'th': year is 2552 while regex assumes 19..|20.., might be fixed
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| mit |
ketjow4/NOV | Lib/site-packages/numpy/distutils/fcompiler/nag.py | 94 | 1332 | import sys
from numpy.distutils.fcompiler import FCompiler
compilers = ['NAGFCompiler']
class NAGFCompiler(FCompiler):
compiler_type = 'nag'
description = 'NAGWare Fortran 95 Compiler'
version_pattern = r'NAGWare Fortran 95 compiler Release (?P<version>[^\s]*)'
executables = {
'version_cmd' : ["<F90>", "-V"],
'compiler_f77' : ["f95", "-fixed"],
'compiler_fix' : ["f95", "-fixed"],
'compiler_f90' : ["f95"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags_linker_so(self):
if sys.platform=='darwin':
return ['-unsharedf95','-Wl,-bundle,-flat_namespace,-undefined,suppress']
return ["-Wl,-shared"]
def get_flags_opt(self):
return ['-O4']
def get_flags_arch(self):
version = self.get_version()
if version and version < '5.1':
return ['-target=native']
else:
return ['']
def get_flags_debug(self):
return ['-g','-gline','-g90','-nan','-C']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='nag')
compiler.customize()
print(compiler.get_version())
| gpl-3.0 |
kingvuplus/ee | mytest.py | 2 | 16731 | import sys, os
if os.path.isfile("/usr/lib/enigma2/python/enigma.zip"):
sys.path.append("/usr/lib/enigma2/python/enigma.zip")
from Tools.Profile import profile, profile_final
profile("PYTHON_START")
import Tools.RedirectOutput
import enigma
import eConsoleImpl
import eBaseImpl
enigma.eTimer = eBaseImpl.eTimer
enigma.eSocketNotifier = eBaseImpl.eSocketNotifier
enigma.eConsoleAppContainer = eConsoleImpl.eConsoleAppContainer
from traceback import print_exc
profile("SimpleSummary")
from Screens import InfoBar
from Screens.SimpleSummary import SimpleSummary
from sys import stdout, exc_info
profile("Bouquets")
from Components.config import config, configfile, ConfigText, ConfigYesNo, ConfigInteger, NoSave
config.misc.load_unlinked_userbouquets = ConfigYesNo(default=True)
def setLoadUnlinkedUserbouquets(configElement):
enigma.eDVBDB.getInstance().setLoadUnlinkedUserbouquets(configElement.value)
config.misc.load_unlinked_userbouquets.addNotifier(setLoadUnlinkedUserbouquets)
enigma.eDVBDB.getInstance().reloadBouquets()
profile("ParentalControl")
import Components.ParentalControl
Components.ParentalControl.InitParentalControl()
profile("LOAD:Navigation")
from Navigation import Navigation
profile("LOAD:skin")
from skin import readSkin
profile("LOAD:Tools")
from Tools.Directories import InitFallbackFiles, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_SKIN
InitFallbackFiles()
profile("config.misc")
config.misc.radiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "radio.mvi"))
config.misc.blackradiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "black.mvi"))
config.misc.useTransponderTime = ConfigYesNo(default=True)
config.misc.startCounter = ConfigInteger(default=0) # number of e2 starts...
config.misc.standbyCounter = NoSave(ConfigInteger(default=0)) # number of standby
config.misc.DeepStandby = NoSave(ConfigYesNo(default=False)) # detect deepstandby
config.misc.RestartUI = ConfigYesNo(default=False) # detect user interface restart
config.misc.epgcache_filename = ConfigText(default = "/hdd/epg.dat")
def setEPGCachePath(configElement):
enigma.eEPGCache.getInstance().setCacheFile(configElement.value)
#demo code for use of standby enter leave callbacks
#def leaveStandby():
# print "!!!!!!!!!!!!!!!!!leave standby"
#def standbyCountChanged(configElement):
# print "!!!!!!!!!!!!!!!!!enter standby num", configElement.value
# from Screens.Standby import inStandby
# inStandby.onClose.append(leaveStandby)
#config.misc.standbyCounter.addNotifier(standbyCountChanged, initial_call = False)
####################################################
def useTransponderTimeChanged(configElement):
enigma.eDVBLocalTimeHandler.getInstance().setUseDVBTime(configElement.value)
config.misc.useTransponderTime.addNotifier(useTransponderTimeChanged)
profile("Twisted")
try:
import twisted.python.runtime
import e2reactor
e2reactor.install()
from twisted.internet import reactor
def runReactor():
reactor.run(installSignalHandlers=False)
except ImportError:
print "twisted not available"
def runReactor():
enigma.runMainloop()
profile("LOAD:Plugin")
# initialize autorun plugins and plugin menu entries
from Components.PluginComponent import plugins
profile("LOAD:Wizard")
from Screens.Wizard import wizardManager
from Screens.DefaultWizard import *
from Screens.StartWizard import *
from Screens.TutorialWizard import *
import Screens.Rc
from Tools.BoundFunction import boundFunction
from Plugins.Plugin import PluginDescriptor
profile("misc")
had = dict()
def dump(dir, p = ""):
if isinstance(dir, dict):
for (entry, val) in dir.items():
dump(val, p + "(dict)/" + entry)
if hasattr(dir, "__dict__"):
for name, value in dir.__dict__.items():
if not had.has_key(str(value)):
had[str(value)] = 1
dump(value, p + "/" + str(name))
else:
print p + "/" + str(name) + ":" + str(dir.__class__) + "(cycle)"
else:
print p + ":" + str(dir)
# + ":" + str(dir.__class__)
# display
profile("LOAD:ScreenGlobals")
from Screens.Globals import Globals
from Screens.SessionGlobals import SessionGlobals
from Screens.Screen import Screen
profile("Screen")
Screen.global_screen = Globals()
# Session.open:
# * push current active dialog ('current_dialog') onto stack
# * call execEnd for this dialog
# * clear in_exec flag
# * hide screen
# * instantiate new dialog into 'current_dialog'
# * create screens, components
# * read, apply skin
# * create GUI for screen
# * call execBegin for new dialog
# * set in_exec
# * show gui screen
# * call components' / screen's onExecBegin
# ... screen is active, until it calls 'close'...
# Session.close:
# * assert in_exec
# * save return value
# * start deferred close handler ('onClose')
# * execEnd
# * clear in_exec
# * hide screen
# .. a moment later:
# Session.doClose:
# * destroy screen
class Session:
def __init__(self, desktop = None, summary_desktop = None, navigation = None):
self.desktop = desktop
self.summary_desktop = summary_desktop
self.nav = navigation
self.delay_timer = enigma.eTimer()
self.delay_timer.callback.append(self.processDelay)
self.current_dialog = None
self.dialog_stack = [ ]
self.summary_stack = [ ]
self.summary = None
self.in_exec = False
self.screen = SessionGlobals(self)
for p in plugins.getPlugins(PluginDescriptor.WHERE_SESSIONSTART):
try:
p(reason=0, session=self)
except:
print "Plugin raised exception at WHERE_SESSIONSTART"
import traceback
traceback.print_exc()
def processDelay(self):
callback = self.current_dialog.callback
retval = self.current_dialog.returnValue
if self.current_dialog.isTmp:
self.current_dialog.doClose()
# dump(self.current_dialog)
del self.current_dialog
else:
del self.current_dialog.callback
self.popCurrent()
if callback is not None:
callback(*retval)
def execBegin(self, first=True, do_show = True):
assert not self.in_exec
self.in_exec = True
c = self.current_dialog
# when this is an execbegin after a execend of a "higher" dialog,
# popSummary already did the right thing.
if first:
self.instantiateSummaryDialog(c)
c.saveKeyboardMode()
c.execBegin()
# when execBegin opened a new dialog, don't bother showing the old one.
if c == self.current_dialog and do_show:
c.show()
def execEnd(self, last=True):
assert self.in_exec
self.in_exec = False
self.current_dialog.execEnd()
self.current_dialog.restoreKeyboardMode()
self.current_dialog.hide()
if last:
self.current_dialog.removeSummary(self.summary)
self.popSummary()
def instantiateDialog(self, screen, *arguments, **kwargs):
return self.doInstantiateDialog(screen, arguments, kwargs, self.desktop)
def deleteDialog(self, screen):
screen.hide()
screen.doClose()
def instantiateSummaryDialog(self, screen, **kwargs):
self.pushSummary()
summary = screen.createSummary() or SimpleSummary
arguments = (screen,)
self.summary = self.doInstantiateDialog(summary, arguments, kwargs, self.summary_desktop)
self.summary.show()
screen.addSummary(self.summary)
def doInstantiateDialog(self, screen, arguments, kwargs, desktop):
# create dialog
dlg = screen(self, *arguments, **kwargs)
if dlg is None:
return
# read skin data
readSkin(dlg, None, dlg.skinName, desktop)
# create GUI view of this dialog
dlg.setDesktop(desktop)
dlg.applySkin()
return dlg
def pushCurrent(self):
if self.current_dialog is not None:
self.dialog_stack.append((self.current_dialog, self.current_dialog.shown))
self.execEnd(last=False)
def popCurrent(self):
if self.dialog_stack:
(self.current_dialog, do_show) = self.dialog_stack.pop()
self.execBegin(first=False, do_show=do_show)
else:
self.current_dialog = None
def execDialog(self, dialog):
self.pushCurrent()
self.current_dialog = dialog
self.current_dialog.isTmp = False
self.current_dialog.callback = None # would cause re-entrancy problems.
self.execBegin()
def openWithCallback(self, callback, screen, *arguments, **kwargs):
dlg = self.open(screen, *arguments, **kwargs)
dlg.callback = callback
return dlg
def open(self, screen, *arguments, **kwargs):
if self.dialog_stack and not self.in_exec:
raise RuntimeError("modal open are allowed only from a screen which is modal!")
# ...unless it's the very first screen.
self.pushCurrent()
dlg = self.current_dialog = self.instantiateDialog(screen, *arguments, **kwargs)
dlg.isTmp = True
dlg.callback = None
self.execBegin()
return dlg
def close(self, screen, *retval):
if not self.in_exec:
print "close after exec!"
return
# be sure that the close is for the right dialog!
# if it's not, you probably closed after another dialog
# was opened. this can happen if you open a dialog
# onExecBegin, and forget to do this only once.
# after close of the top dialog, the underlying will
# gain focus again (for a short time), thus triggering
# the onExec, which opens the dialog again, closing the loop.
assert screen == self.current_dialog
self.current_dialog.returnValue = retval
self.delay_timer.start(0, 1)
self.execEnd()
def pushSummary(self):
if self.summary is not None:
self.summary.hide()
self.summary_stack.append(self.summary)
self.summary = None
def popSummary(self):
if self.summary is not None:
self.summary.doClose()
self.summary = self.summary_stack.pop()
if self.summary is not None:
self.summary.show()
profile("Standby,PowerKey")
import Screens.Standby
from Screens.Menu import MainMenu, mdom
from GlobalActions import globalActionMap
class PowerKey:
""" PowerKey stuff - handles the powerkey press and powerkey release actions"""
def __init__(self, session):
self.session = session
globalActionMap.actions["power_down"]=self.powerdown
globalActionMap.actions["power_up"]=self.powerup
globalActionMap.actions["power_long"]=self.powerlong
globalActionMap.actions["deepstandby"]=self.shutdown # frontpanel long power button press
globalActionMap.actions["discrete_off"]=self.standby
self.standbyblocked = 1
def MenuClosed(self, *val):
self.session.infobar = None
def shutdown(self):
print "PowerOff - Now!"
if not Screens.Standby.inTryQuitMainloop and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND:
self.session.open(Screens.Standby.TryQuitMainloop, 1)
def powerlong(self):
if Screens.Standby.inTryQuitMainloop or (self.session.current_dialog and not self.session.current_dialog.ALLOW_SUSPEND):
return
self.doAction(action = config.usage.on_long_powerpress.value)
def doAction(self, action):
self.standbyblocked = 1
if action == "shutdown":
self.shutdown()
elif action == "show_menu":
print "Show shutdown Menu"
root = mdom.getroot()
for x in root.findall("menu"):
y = x.find("id")
if y is not None:
id = y.get("val")
if id and id == "shutdown":
self.session.infobar = self
menu_screen = self.session.openWithCallback(self.MenuClosed, MainMenu, x)
menu_screen.setTitle(_("Standby / restart"))
return
elif action == "standby":
self.standby()
def powerdown(self):
self.standbyblocked = 0
def powerup(self):
if self.standbyblocked == 0:
self.doAction(action = config.usage.on_short_powerpress.value)
def standby(self):
if not Screens.Standby.inStandby and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND and self.session.in_exec:
self.session.open(Screens.Standby.Standby)
profile("Scart")
from Screens.Scart import Scart
class AutoScartControl:
def __init__(self, session):
self.force = False
self.current_vcr_sb = enigma.eAVSwitch.getInstance().getVCRSlowBlanking()
if self.current_vcr_sb and config.av.vcrswitch.value:
self.scartDialog = session.instantiateDialog(Scart, True)
else:
self.scartDialog = session.instantiateDialog(Scart, False)
config.av.vcrswitch.addNotifier(self.recheckVCRSb)
enigma.eAVSwitch.getInstance().vcr_sb_notifier.get().append(self.VCRSbChanged)
def recheckVCRSb(self, configElement):
self.VCRSbChanged(self.current_vcr_sb)
def VCRSbChanged(self, value):
#print "vcr sb changed to", value
self.current_vcr_sb = value
if config.av.vcrswitch.value or value > 2:
if value:
self.scartDialog.showMessageBox()
else:
self.scartDialog.switchToTV()
profile("Load:CI")
from enigma import eDVBCIInterfaces
from Screens.Ci import CiHandler
profile("Load:VolumeControl")
from Components.VolumeControl import VolumeControl
def runScreenTest():
config.misc.startCounter.value += 1
config.misc.startCounter.save()
profile("readPluginList")
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
profile("Init:Session")
nav = Navigation()
session = Session(desktop = enigma.getDesktop(0), summary_desktop = enigma.getDesktop(1), navigation = nav)
CiHandler.setSession(session)
screensToRun = [ p.__call__ for p in plugins.getPlugins(PluginDescriptor.WHERE_WIZARD) ]
profile("wizards")
screensToRun += wizardManager.getWizards()
screensToRun.append((100, InfoBar.InfoBar))
screensToRun.sort()
enigma.ePythonConfigQuery.setQueryFunc(configfile.getResolvedKey)
# eDVBCIInterfaces.getInstance().setDescrambleRules(0 # Slot Number
# ,( ["1:0:1:24:4:85:C00000:0:0:0:"], #service_list
# ["PREMIERE"], #provider_list,
# [] #caid_list
# ));
def runNextScreen(session, screensToRun, *result):
if result:
enigma.quitMainloop(*result)
return
screen = screensToRun[0][1]
args = screensToRun[0][2:]
if screensToRun:
session.openWithCallback(boundFunction(runNextScreen, session, screensToRun[1:]), screen, *args)
else:
session.open(screen, *args)
config.misc.epgcache_filename.addNotifier(setEPGCachePath)
runNextScreen(session, screensToRun)
profile("Init:VolumeControl")
vol = VolumeControl(session)
profile("Init:PowerKey")
power = PowerKey(session)
# we need session.scart to access it from within menu.xml
session.scart = AutoScartControl(session)
profile("Init:Trashcan")
import Tools.Trashcan
Tools.Trashcan.init(session)
profile("RunReactor")
profile_final()
runReactor()
profile("wakeup")
from time import time, strftime, localtime
from Tools.StbHardware import setFPWakeuptime, getFPWakeuptime, setRTCtime
#get currentTime
nowTime = time()
wakeupList = [
x for x in ((session.nav.RecordTimer.getNextRecordingTime(), 0, session.nav.RecordTimer.isNextRecordAfterEventActionAuto()),
(session.nav.RecordTimer.getNextZapTime(), 1),
(plugins.getNextWakeupTime(), 2))
if x[0] != -1
]
wakeupList.sort()
if wakeupList:
from time import strftime
startTime = wakeupList[0]
if (startTime[0] - nowTime) < 270: # no time to switch box back on
wptime = nowTime + 30 # so switch back on in 30 seconds
else:
wptime = startTime[0] - 240
if not config.misc.useTransponderTime.value:
print "dvb time sync disabled... so set RTC now to current linux time!", strftime("%Y/%m/%d %H:%M", localtime(nowTime))
setRTCtime(nowTime)
print "set wakeup time to", strftime("%Y/%m/%d %H:%M", localtime(wptime))
setFPWakeuptime(wptime)
profile("stopService")
session.nav.stopService()
profile("nav shutdown")
session.nav.shutdown()
profile("configfile.save")
configfile.save()
# from Screens import InfoBarGenerics
# InfoBarGenerics.saveResumePoints()
return 0
profile("Init:skin")
import skin
skin.loadSkinData(enigma.getDesktop(0))
profile("InputDevice")
import Components.InputDevice
Components.InputDevice.InitInputDevices()
import Components.InputHotplug
profile("SetupDevices")
import Components.SetupDevices
Components.SetupDevices.InitSetupDevices()
profile("AVSwitch")
import Components.AVSwitch
Components.AVSwitch.InitAVSwitch()
profile("RecordingConfig")
import Components.RecordingConfig
Components.RecordingConfig.InitRecordingConfig()
profile("UsageConfig")
import Components.UsageConfig
Components.UsageConfig.InitUsageConfig()
profile("keymapparser")
import keymapparser
keymapparser.readKeymap(config.usage.keymap.value)
profile("Network")
import Components.Network
Components.Network.InitNetwork()
profile("LCD")
import Components.Lcd
Components.Lcd.InitLcd()
profile("RFMod")
import Components.RFmod
Components.RFmod.InitRFmod()
profile("Init:CI")
import Screens.Ci
Screens.Ci.InitCiConfig()
profile("RcModel")
import Components.RcModel
#from enigma import dump_malloc_stats
#t = eTimer()
#t.callback.append(dump_malloc_stats)
#t.start(1000)
# first, setup a screen
try:
runScreenTest()
plugins.shutdown()
Components.ParentalControl.parentalControl.save()
except:
print 'EXCEPTION IN PYTHON STARTUP CODE:'
print '-'*60
print_exc(file=stdout)
enigma.quitMainloop(5)
print '-'*60
| gpl-2.0 |
sabi0/intellij-community | python/lib/Lib/quopri.py | 424 | 6969 | #! /usr/bin/env python
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = '='
MAXLINESIZE = 76
HEX = '0123456789ABCDEF'
EMPTYSTRING = ''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular character needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
if c in ' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == '_':
return header
return c == ESCAPE or not (' ' <= c <= '~')
def quote(c):
"""Quote a single character."""
i = ord(c)
return ESCAPE + HEX[i//16] + HEX[i%16]
def encode(input, output, quotetabs, header = 0):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per
RFC 1522.
"""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs = quotetabs, header = header)
output.write(odata)
return
def write(s, output=output, lineEnd='\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in ' \t':
output.write(s[:-1] + quote(s[-1]) + lineEnd)
elif s == '.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = ''
if line[-1:] == '\n':
line = line[:-1]
stripped = '\n'
# Calculate the un-length-limited encoded line
for c in line:
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == ' ':
outline.append('_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd='=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs = 0, header = 0):
if b2a_qp is not None:
return b2a_qp(s, quotetabs = quotetabs, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header = 0):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header = header)
output.write(odata)
return
new = ''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1] == '\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1] in " \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i]
if c == '_' and header:
new = new + ' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]):
new = new + chr(unhex(line[i+1:i+3])); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + '\n')
new = ''
if new:
output.write(new)
def decodestring(s, header = 0):
if a2b_qp is not None:
return a2b_qp(s, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
decode(infp, outfp, header = header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the character 'c' is a hexadecimal digit."""
return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
if '0' <= c <= '9':
i = ord('0')
elif 'a' <= c <= 'f':
i = ord('a')-10
elif 'A' <= c <= 'F':
i = ord('A')-10
else:
break
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print "usage: quopri [-t | -d] [file] ..."
print "-t: quote tabs"
print "-d: decode; default encode"
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print "-t and -d are mutually exclusive"
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin
else:
try:
fp = open(file)
except IOError, msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
if deco:
decode(fp, sys.stdout)
else:
encode(fp, sys.stdout, tabs)
if fp is not sys.stdin:
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
| apache-2.0 |
wnt-zhp/hufce | django/utils/numberformat.py | 94 | 1775 | from django.conf import settings
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False):
"""
Gets a number (as a number or string), and returns it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping > 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(unicode(number))
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
str_number = unicode(number)
if str_number[0] == '-':
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part:
dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| gpl-3.0 |
radiasoft/crossbarexamples | django/realtimemonitor/django_app/models.py | 9 | 1331 | # -*- coding: utf-8 -*-
import requests
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.forms.models import model_to_dict
class Client(models.Model):
""" Our client configuration """
# Client unique identifier
ip = models.GenericIPAddressField()
# What data to send to the dashboard
show_cpus = models.BooleanField(default=True)
show_memory = models.BooleanField(default=True)
show_disk = models.BooleanField(default=True)
# Stop sending data
disabled = models.BooleanField(default=False)
# Data refresh frequency
frequency = models.IntegerField(default=1)
def __unicode__(self):
return self.ip
@receiver(post_save, sender=Client, dispatch_uid="server_post_save")
def notify_server_config_changed(sender, instance, **kwargs):
""" Notifies a client that its config has changed.
This function is executed when we save a Client model, and it
makes a POST request on the WAMP-HTTP bridge, allowing us to
make a WAMP publication from Django.
"""
requests.post("http://127.0.0.1:8080/notify",
json={
'topic': 'clientconfig.' + instance.ip,
'args': [model_to_dict(instance)]
}) | apache-2.0 |
mandeepdhami/nova | nova/scheduler/filters/json_filter.py | 60 | 4799 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_serialization import jsonutils
import six
from nova.scheduler import filters
class JsonFilter(filters.BaseHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def _parse_string(self, string, host_state):
"""Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may
use: $variable.dictkey
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
obj = getattr(host_state, path[0], None)
if obj is None:
return None
for item in path[1:]:
obj = obj.get(item, None)
if obj is None:
return None
return obj
def _process_filter(self, query, host_state):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host_state)
elif isinstance(arg, six.string_types):
arg = self._parse_string(arg, host_state)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
try:
query = filter_properties['scheduler_hints']['query']
except KeyError:
query = None
if not query:
return True
# NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
# Filter it out.
return True
return False
| apache-2.0 |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/cython/src/Cython/Compiler/CodeGeneration.py | 99 | 1097 | from Cython.Compiler.Visitor import VisitorTransform
from Cython.Compiler.Nodes import StatListNode
class ExtractPxdCode(VisitorTransform):
"""
Finds nodes in a pxd file that should generate code, and
returns them in a StatListNode.
The result is a tuple (StatListNode, ModuleScope), i.e.
everything that is needed from the pxd after it is processed.
A purer approach would be to seperately compile the pxd code,
but the result would have to be slightly more sophisticated
than pure strings (functions + wanted interned strings +
wanted utility code + wanted cached objects) so for now this
approach is taken.
"""
def __call__(self, root):
self.funcs = []
self.visitchildren(root)
return (StatListNode(root.pos, stats=self.funcs), root.scope)
def visit_FuncDefNode(self, node):
self.funcs.append(node)
# Do not visit children, nested funcdefnodes will
# also be moved by this action...
return node
def visit_Node(self, node):
self.visitchildren(node)
return node
| bsd-3-clause |
dpassante/ansible | lib/ansible/plugins/inventory/ini.py | 16 | 17610 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: ini
version_added: "2.4"
short_description: Uses an Ansible INI file as inventory source.
description:
- INI file based inventory, sections are groups or group related with special `:modifiers`.
- Entries in sections C([group_1]) are hosts, members of the group.
- Hosts can have variables defined inline as key/value pairs separated by C(=).
- The C(children) modifier indicates that the section contains groups.
- The C(vars) modifier indicates that the section contains variables assigned to members of the group.
- Anything found outside a section is considered an 'ungrouped' host.
- Values passed in the INI format using the ``key=value`` syntax are interpreted differently depending on where they are declared within your inventory.
- When declared inline with the host, INI values are processed by Python's ast.literal_eval function
(U(https://docs.python.org/2/library/ast.html#ast.literal_eval)) and interpreted as Python literal structures
(strings, numbers, tuples, lists, dicts, booleans, None). Host lines accept multiple C(key=value) parameters per line.
Therefore they need a way to indicate that a space is part of a value rather than a separator.
- When declared in a C(:vars) section, INI values are interpreted as strings. For example C(var=FALSE) would create a string equal to C(FALSE).
Unlike host lines, C(:vars) sections accept only a single entry per line, so everything after the C(=) must be the value for the entry.
- Do not rely on types set during definition, always make sure you specify type with a filter when needed when consuming the variable.
- See the Examples for proper quoting to prevent changes to variable type.
notes:
- Whitelisted in configuration by default.
- Consider switching to YAML format for inventory sources to avoid confusion on the actual type of a variable.
The YAML inventory plugin processes variable values consistently and correctly.
'''
EXAMPLES = '''
example1: |
# example cfg file
[web]
host1
host2 ansible_port=222 # defined inline, interpreted as an integer
[web:vars]
http_port=8080 # all members of 'web' will inherit these
myvar=23 # defined in a :vars section, interpreted as a string
[web:children] # child groups will automatically add their hosts to parent group
apache
nginx
[apache]
tomcat1
tomcat2 myvar=34 # host specific vars override group vars
tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
[nginx]
jenkins1
[nginx:vars]
has_java = True # vars in child groups override same in parent
[all:vars]
has_java = False # 'all' is 'top' parent
example2: |
# other example config
host1 # this is 'ungrouped'
# both hosts have same IP but diff ports, also 'ungrouped'
host2 ansible_host=127.0.0.1 ansible_port=44
host3 ansible_host=127.0.0.1 ansible_port=45
[g1]
host4
[g2]
host4 # same host as above, but member of 2 groups, will inherit vars from both
# inventory hostnames are unique
'''
import ast
import re
from ansible.inventory.group import to_safe_group_name
from ansible.plugins.inventory import BaseFileInventoryPlugin
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_bytes, to_text
from ansible.utils.shlex import shlex_split
class InventoryModule(BaseFileInventoryPlugin):
"""
Takes an INI-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
NAME = 'ini'
_COMMENT_MARKERS = frozenset((u';', u'#'))
b_COMMENT_MARKERS = frozenset((b';', b'#'))
def __init__(self):
super(InventoryModule, self).__init__()
self.patterns = {}
self._filename = None
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._filename = path
try:
# Read in the hosts, groups, and variables defined in the inventory file.
if self.loader:
(b_data, private) = self.loader._get_file_contents(path)
else:
b_path = to_bytes(path, errors='surrogate_or_strict')
with open(b_path, 'rb') as fh:
b_data = fh.read()
try:
# Faster to do to_text once on a long string than many
# times on smaller strings
data = to_text(b_data, errors='surrogate_or_strict').splitlines()
except UnicodeError:
# Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
data = []
for line in b_data.splitlines():
if line and line[0] in self.b_COMMENT_MARKERS:
# Replace is okay for comment lines
# data.append(to_text(line, errors='surrogate_then_replace'))
# Currently we only need these lines for accurate lineno in errors
data.append(u'')
else:
# Non-comment lines still have to be valid uf-8
data.append(to_text(line, errors='surrogate_or_strict'))
self._parse(path, data)
except Exception as e:
raise AnsibleParserError(e)
def _raise_error(self, message):
raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
def _parse(self, path, lines):
'''
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
'''
self._compile_patterns()
# We behave as though the first line of the inventory is '[ungrouped]',
# and begin to look for host definitions. We make a single pass through
# each line of the inventory, building up self.groups and adding hosts,
# subgroups, and setting variables as we go.
pending_declarations = {}
groupname = 'ungrouped'
state = 'hosts'
self.lineno = 0
for line in lines:
self.lineno += 1
line = line.strip()
# Skip empty lines and comments
if not line or line[0] in self._COMMENT_MARKERS:
continue
# Is this a [section] header? That tells us what group we're parsing
# definitions for, and what kind of definitions to expect.
m = self.patterns['section'].match(line)
if m:
(groupname, state) = m.groups()
groupname = to_safe_group_name(groupname)
state = state or 'hosts'
if state not in ['hosts', 'children', 'vars']:
title = ":".join(m.groups())
self._raise_error("Section [%s] has unknown type: %s" % (title, state))
# If we haven't seen this group before, we add a new Group.
if groupname not in self.inventory.groups:
# Either [groupname] or [groupname:children] is sufficient to declare a group,
# but [groupname:vars] is allowed only if the # group is declared elsewhere.
# We add the group anyway, but make a note in pending_declarations to check at the end.
#
# It's possible that a group is previously pending due to being defined as a child
# group, in that case we simply pass so that the logic below to process pending
# declarations will take the appropriate action for a pending child group instead of
# incorrectly handling it as a var state pending declaration
if state == 'vars' and groupname not in pending_declarations:
pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
self.inventory.add_group(groupname)
# When we see a declaration that we've been waiting for, we process and delete.
if groupname in pending_declarations and state != 'vars':
if pending_declarations[groupname]['state'] == 'children':
self._add_pending_children(groupname, pending_declarations)
elif pending_declarations[groupname]['state'] == 'vars':
del pending_declarations[groupname]
continue
elif line.startswith('[') and line.endswith(']'):
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + " " +
"in the section entry, and that there are no other invalid characters")
# It's not a section, so the current state tells us what kind of
# definition it must be. The individual parsers will raise an
# error if we feed them something they can't digest.
# [groupname] contains host definitions that must be added to
# the current group.
if state == 'hosts':
hosts, port, variables = self._parse_host_definition(line)
self._populate_host_vars(hosts, variables, groupname, port)
# [groupname:vars] contains variable definitions that must be
# applied to the current group.
elif state == 'vars':
(k, v) = self._parse_variable_definition(line)
self.inventory.set_variable(groupname, k, v)
# [groupname:children] contains subgroup names that must be
# added as children of the current group. The subgroup names
# must themselves be declared as groups, but as before, they
# may only be declared later.
elif state == 'children':
child = self._parse_group_name(line)
if child not in self.inventory.groups:
if child not in pending_declarations:
pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
else:
pending_declarations[child]['parents'].append(groupname)
else:
self.inventory.add_child(groupname, child)
else:
# This can happen only if the state checker accepts a state that isn't handled above.
self._raise_error("Entered unhandled state: %s" % (state))
# Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
# We report only the first such error here.
for g in pending_declarations:
decl = pending_declarations[g]
if decl['state'] == 'vars':
raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
elif decl['state'] == 'children':
raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
def _add_pending_children(self, group, pending):
for parent in pending[group]['parents']:
self.inventory.add_child(parent, group)
if parent in pending and pending[parent]['state'] == 'children':
self._add_pending_children(parent, pending)
del pending[group]
def _parse_group_name(self, line):
'''
Takes a single line and tries to parse it as a group name. Returns the
group name if successful, or raises an error.
'''
m = self.patterns['groupname'].match(line)
if m:
return m.group(1)
self._raise_error("Expected group name, got: %s" % (line))
def _parse_variable_definition(self, line):
'''
Takes a string and tries to parse it as a variable definition. Returns
the key and value if successful, or raises an error.
'''
# TODO: We parse variable assignments as a key (anything to the left of
# an '='"), an '=', and a value (anything left) and leave the value to
# _parse_value to sort out. We should be more systematic here about
# defining what is acceptable, how quotes work, and so on.
if '=' in line:
(k, v) = [e.strip() for e in line.split("=", 1)]
return (k, self._parse_value(v))
self._raise_error("Expected key=value, got: %s" % (line))
def _parse_host_definition(self, line):
'''
Takes a single line and tries to parse it as a host definition. Returns
a list of Hosts if successful, or raises an error.
'''
# A host definition comprises (1) a non-whitespace hostname or range,
# optionally followed by (2) a series of key="some value" assignments.
# We ignore any trailing whitespace and/or comments. For example, here
# are a series of host definitions in a group:
#
# [groupname]
# alpha
# beta:2345 user=admin # we'll tell shlex
# gamma sudo=True user=root # to ignore comments
try:
tokens = shlex_split(line, comments=True)
except ValueError as e:
self._raise_error("Error parsing host definition '%s': %s" % (line, e))
(hostnames, port) = self._expand_hostpattern(tokens[0])
# Try to process anything remaining as a series of key=value pairs.
variables = {}
for t in tokens[1:]:
if '=' not in t:
self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
(k, v) = t.split('=', 1)
variables[k] = self._parse_value(v)
return hostnames, port, variables
def _expand_hostpattern(self, hostpattern):
'''
do some extra checks over normal processing
'''
# specification?
hostnames, port = super(InventoryModule, self)._expand_hostpattern(hostpattern)
if hostpattern.strip().endswith(':') and port is None:
raise AnsibleParserError("Invalid host pattern '%s' supplied, ending in ':' is not allowed, this character is reserved to provide a port." %
hostpattern)
for pattern in hostnames:
# some YAML parsing prevention checks
if pattern.strip() == '---':
raise AnsibleParserError("Invalid host pattern '%s' supplied, '---' is normally a sign this is a YAML file." % hostpattern)
return (hostnames, port)
@staticmethod
def _parse_value(v):
'''
Attempt to transform the string value from an ini file into a basic python object
(int, dict, list, unicode string, etc).
'''
try:
v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return to_text(v, nonstring='passthru', errors='surrogate_or_strict')
def _compile_patterns(self):
'''
Compiles the regular expressions required to parse the inventory and
stores them in self.patterns.
'''
# Section names are square-bracketed expressions at the beginning of a
# line, comprising (1) a group name optionally followed by (2) a tag
# that specifies the contents of the section. We ignore any trailing
# whitespace and/or comments. For example:
#
# [groupname]
# [somegroup:vars]
# [naughty:children] # only get coal in their stockings
self.patterns['section'] = re.compile(
to_text(r'''^\[
([^:\]\s]+) # group name (see groupname below)
(?::(\w+))? # optional : and tag name
\]
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
''', errors='surrogate_or_strict'), re.X
)
# FIXME: What are the real restrictions on group names, or rather, what
# should they be? At the moment, they must be non-empty sequences of non
# whitespace characters excluding ':' and ']', but we should define more
# precise rules in order to support better diagnostics.
self.patterns['groupname'] = re.compile(
to_text(r'''^
([^:\]\s]+)
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
''', errors='surrogate_or_strict'), re.X
)
| gpl-3.0 |
spxtr/contrib | mungegithub/issue-labeler/simple_app.py | 20 | 4662 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import simplejson
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
from flask import Flask, request
from sklearn.feature_extraction import FeatureHasher
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
app = Flask(__name__)
#Parameters
team_fn= "./models/trained_teams_model.pkl"
component_fn= "./models/trained_components_model.pkl"
logFile = "/tmp/issue-labeler.log"
logSize = 1024*1024*100
numFeatures = 262144
myLoss = 'hinge'
myAlpha = .1
myPenalty = 'l2'
myHasher = FeatureHasher(input_type="string", n_features= numFeatures, non_negative=True)
myStemmer = PorterStemmer()
tokenizer = RegexpTokenizer(r'\w+')
try:
if not stopwords:
stop_fn = "./stopwords.txt"
with open(stop_fn, 'r') as f:
stopwords = set([word.strip() for word in f])
except:
#don't remove any stopwords
stopwords = []
@app.errorhandler(500)
def internal_error(exception):
return str(exception), 500
@app.route("/", methods = ["POST"])
def get_labels():
"""
The request should contain 2 form-urlencoded parameters
1) title : title of the issue
2) body: body of the issue
It returns a team/<label> and a component/<label>
"""
title = request.form.get('title', "")
body = request.form.get('body', "")
tokens = tokenize_stem_stop(" ".join([title, body]))
team_mod = joblib.load(team_fn)
comp_mod = joblib.load(component_fn)
vec = myHasher.transform([tokens])
tlabel = team_mod.predict(vec)[0]
clabel = comp_mod.predict(vec)[0]
return ",".join([tlabel, clabel])
def tokenize_stem_stop(inputString):
inputString = inputString.encode('utf-8')
curTitleBody = tokenizer.tokenize(inputString.decode('utf-8').lower())
return map(myStemmer.stem, filter(lambda x: x not in stopwords, curTitleBody))
@app.route("/update_models", methods = ["PUT"])
def update_model():
"""
data should contain three fields
titles: list of titles
bodies: list of bodies
labels: list of list of labels
"""
data = request.json
titles = data.get('titles')
bodies = data.get('bodies')
labels = data.get('labels')
tTokens = []
cTokens = []
team_labels = []
component_labels = []
for (title, body, label_list) in zip(titles, bodies, labels):
tLabel = filter(lambda x: x.startswith('team'), label_list)
cLabel = filter(lambda x: x.startswith('component'), label_list)
tokens = tokenize_stem_stop(" ".join([title, body]))
if tLabel:
team_labels += tLabel
tTokens += [tokens]
if cLabel:
component_labels += cLabel
cTokens += [tokens]
tVec = myHasher.transform(tTokens)
cVec = myHasher.transform(cTokens)
if team_labels:
if os.path.isfile(team_fn):
team_model = joblib.load(team_fn)
team_model.partial_fit(tVec, np.array(team_labels))
else:
#no team model stored so build a new one
team_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
team_model.fit(tVec, np.array(team_labels))
if component_labels:
if os.path.isfile(component_fn):
component_model = joblib.load(component_fn)
component_model.partial_fit(cVec, np.array(component_labels))
else:
#no comp model stored so build a new one
component_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
component_model.fit(cVec, np.array(component_labels))
joblib.dump(team_model, team_fn)
joblib.dump(component_model, component_fn)
return ""
def configure_logger():
FORMAT = '%(asctime)-20s %(levelname)-10s %(message)s'
file_handler = RotatingFileHandler(logFile, maxBytes=logSize, backupCount=3)
formatter = logging.Formatter(FORMAT)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if __name__ == "__main__":
configure_logger()
app.run(host="0.0.0.0")
| apache-2.0 |
AsgerPetersen/QGIS | python/ext-libs/owslib/wcs.py | 30 | 1528 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <d.lowe@rl.ac.uk>
#
# Contact email: d.lowe@rl.ac.uk
# =============================================================================
"""
Web Coverage Server (WCS) methods and metadata. Factory function.
"""
import urllib2
import etree
from coverage import wcs100, wcs110, wcsBase
def WebCoverageService(url, version=None, xml=None, cookies=None, timeout=30):
''' wcs factory function, returns a version specific WebCoverageService object '''
if version is None:
if xml is None:
reader = wcsBase.WCSCapabilitiesReader()
request = reader.capabilities_url(url)
if cookies is None:
xml = urllib2.urlopen(request, timeout=timeout).read()
else:
req = urllib2.Request(request)
req.add_header('Cookie', cookies)
xml=urllib2.urlopen(req, timeout=timeout)
capabilities = etree.etree.fromstring(xml)
version = capabilities.get('version')
del capabilities
if version == '1.0.0':
return wcs100.WebCoverageService_1_0_0.__new__(wcs100.WebCoverageService_1_0_0, url, xml, cookies)
elif version == '1.1.0':
return wcs110.WebCoverageService_1_1_0.__new__(wcs110.WebCoverageService_1_1_0,url, xml, cookies)
| gpl-2.0 |
linjoahow/W16_test1 | static/Brython3.1.1-20150328-091302/Lib/xml/sax/handler.py | 925 | 13922 | """
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id$
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print(exception)
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.